[clang] 68bc6d7 - [RISCV] Remove Zvamo Extention

Shao-Ce SUN via cfe-commits cfe-commits at lists.llvm.org
Sun Dec 19 18:28:52 PST 2021


Author: Shao-Ce SUN
Date: 2021-12-20T10:28:39+08:00
New Revision: 68bc6d7cae6d5b8679fcf84c73c114409b6cb469

URL: https://github.com/llvm/llvm-project/commit/68bc6d7cae6d5b8679fcf84c73c114409b6cb469
DIFF: https://github.com/llvm/llvm-project/commit/68bc6d7cae6d5b8679fcf84c73c114409b6cb469.diff

LOG: [RISCV] Remove Zvamo Extention

Based on D111692. Zvamo is not part of the 1.0 V spec. Remove it.

Reviewed By: arcbbb

Differential Revision: https://reviews.llvm.org/D115709

Added: 
    

Modified: 
    clang/test/Driver/riscv-arch.c
    clang/test/Preprocessor/riscv-target-features.c
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Support/RISCVISAInfo.cpp
    llvm/lib/Target/RISCV/RISCV.td
    llvm/lib/Target/RISCV/RISCVInstrFormatsV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/RISCVSchedRocket.td
    llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
    llvm/lib/Target/RISCV/RISCVSubtarget.h
    llvm/test/CodeGen/RISCV/attributes.ll
    llvm/test/MC/RISCV/attribute-arch-invalid.s
    llvm/test/MC/RISCV/attribute-arch.s

Removed: 
    llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll
    llvm/test/MC/RISCV/rvv/zvamo.s


################################################################################
diff  --git a/clang/test/Driver/riscv-arch.c b/clang/test/Driver/riscv-arch.c
index 5b99643309a57..8b590a7e5b4b5 100644
--- a/clang/test/Driver/riscv-arch.c
+++ b/clang/test/Driver/riscv-arch.c
@@ -435,25 +435,6 @@
 // RUN: -fsyntax-only 2>&1 | FileCheck -check-prefix=RV32-EXPERIMENTAL-ZFHMIN %s
 // RV32-EXPERIMENTAL-ZFHMIN: "-target-feature" "+experimental-zfhmin"
 
-// RUN: %clang -target riscv32-unknown-elf -march=rv32izvamo -### %s -c 2>&1 | \
-// RUN:   FileCheck -check-prefix=RV32-EXPERIMENTAL-ZVAMO-NOFLAG %s
-// RV32-EXPERIMENTAL-ZVAMO-NOFLAG: error: invalid arch name 'rv32izvamo'
-// RV32-EXPERIMENTAL-ZVAMO-NOFLAG: requires '-menable-experimental-extensions'
-
-// RUN: %clang -target riscv32-unknown-elf -march=rv32izvamo -menable-experimental-extensions -### %s -c 2>&1 | \
-// RUN:   FileCheck -check-prefix=RV32-EXPERIMENTAL-ZVAMO-NOVERS %s
-// RV32-EXPERIMENTAL-ZVAMO-NOVERS: error: invalid arch name 'rv32izvamo'
-// RV32-EXPERIMENTAL-ZVAMO-NOVERS: experimental extension requires explicit version number
-
-// RUN: %clang -target riscv32-unknown-elf -march=rv32izvamo0p1 -menable-experimental-extensions -### %s -c 2>&1 | \
-// RUN:   FileCheck -check-prefix=RV32-EXPERIMENTAL-ZVAMO-BADVERS %s
-// RV32-EXPERIMENTAL-ZVAMO-BADVERS: error: invalid arch name 'rv32izvamo0p1'
-// RV32-EXPERIMENTAL-ZVAMO-BADVERS: unsupported version number 0.1 for experimental extension 'zvamo'
-
-// RUN: %clang -target riscv32-unknown-elf -march=rv32izvamo0p10 -menable-experimental-extensions -### %s -c 2>&1 | \
-// RUN:   FileCheck -check-prefix=RV32-EXPERIMENTAL-ZVAMO-GOODVERS %s
-// RV32-EXPERIMENTAL-ZVAMO-GOODVERS: "-target-feature" "+experimental-zvamo"
-
 // RUN: %clang -target riscv32-unknown-elf -march=rv32izvlsseg -### %s -c 2>&1 | \
 // RUN:   FileCheck -check-prefix=RV32-EXPERIMENTAL-ZVLSSEG-NOFLAG %s
 // RV32-EXPERIMENTAL-ZVLSSEG-NOFLAG: error: invalid arch name 'rv32izvlsseg'

diff  --git a/clang/test/Preprocessor/riscv-target-features.c b/clang/test/Preprocessor/riscv-target-features.c
index a0a1ac59cc4cc..16cfe1616d00f 100644
--- a/clang/test/Preprocessor/riscv-target-features.c
+++ b/clang/test/Preprocessor/riscv-target-features.c
@@ -31,7 +31,6 @@
 // CHECK-NOT: __riscv_zfh
 // CHECK-NOT: __riscv_v
 // CHECK-NOT: __riscv_vector
-// CHECK-NOT: __riscv_zvamo
 // CHECK-NOT: __riscv_zvlsseg
 
 // RUN: %clang -target riscv32-unknown-linux-gnu -march=rv32im -x c -E -dM %s \
@@ -205,17 +204,6 @@
 // CHECK-V-EXT: __riscv_vector 1
 // CHECK-V-EXT: __riscv_zvlsseg 10000
 
-// RUN: %clang -target riscv32-unknown-linux-gnu -menable-experimental-extensions \
-// RUN: -march=rv32izvamo0p10 -x c -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZVAMO-EXT %s
-// RUN: %clang -target riscv64-unknown-linux-gnu -menable-experimental-extensions \
-// RUN: -march=rv32izvamo0p10 -x c -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZVAMO-EXT %s
-// CHECK-ZVAMO-EXT: __riscv_v 10000
-// CHECK-ZVAMO-EXT: __riscv_vector 1
-// CHECK-ZVAMO-EXT: __riscv_zvamo 10000
-// CHECK-ZVAMO-EXT: __riscv_zvlsseg 10000
-
 // RUN: %clang -target riscv32-unknown-linux-gnu -menable-experimental-extensions \
 // RUN: -march=rv32izfh0p1 -x c -E -dM %s \
 // RUN: -o - | FileCheck --check-prefix=CHECK-ZFH-EXT %s

diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 3ceb347e97bff..747049b1035b5 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -642,20 +642,6 @@ let TargetPrefix = "riscv" in {
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                      LLVMMatchType<2>],
                     [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic;
-  // For atomic operations without mask
-  // Input: (base, index, value, vl)
-  class RISCVAMONoMask
-        : Intrinsic<[llvm_anyvector_ty],
-                    [LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, LLVMMatchType<0>,
-                     llvm_anyint_ty],
-                    [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
-  // For atomic operations with mask
-  // Input: (base, index, value, mask, vl)
-  class RISCVAMOMask
-        : Intrinsic<[llvm_anyvector_ty],
-                    [LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty, LLVMMatchType<0>,
-                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
-                    [NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
 
   // For unit stride segment load
   // Input: (pointer, vl)
@@ -930,10 +916,6 @@ let TargetPrefix = "riscv" in {
     def "int_riscv_" #NAME :RISCVConversionNoMask;
     def "int_riscv_" # NAME # "_mask" : RISCVConversionMask;
   }
-  multiclass RISCVAMO {
-    def "int_riscv_" # NAME : RISCVAMONoMask;
-    def "int_riscv_" # NAME # "_mask" : RISCVAMOMask;
-  }
   multiclass RISCVUSSegLoad<int nf> {
     def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
     def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask<nf>;
@@ -976,16 +958,6 @@ let TargetPrefix = "riscv" in {
   def int_riscv_vlm : RISCVUSLoad;
   def int_riscv_vsm : RISCVUSStore;
 
-  defm vamoswap : RISCVAMO;
-  defm vamoadd : RISCVAMO;
-  defm vamoxor : RISCVAMO;
-  defm vamoand : RISCVAMO;
-  defm vamoor : RISCVAMO;
-  defm vamomin : RISCVAMO;
-  defm vamomax : RISCVAMO;
-  defm vamominu : RISCVAMO;
-  defm vamomaxu : RISCVAMO;
-
   defm vadd : RISCVBinaryAAX;
   defm vsub : RISCVBinaryAAX;
   defm vrsub : RISCVBinaryAAX;

diff  --git a/llvm/lib/Support/RISCVISAInfo.cpp b/llvm/lib/Support/RISCVISAInfo.cpp
index 94929e7e052f1..15a249e6177eb 100644
--- a/llvm/lib/Support/RISCVISAInfo.cpp
+++ b/llvm/lib/Support/RISCVISAInfo.cpp
@@ -61,7 +61,6 @@ static const RISCVSupportedExtension SupportedExperimentalExtensions[] = {
     {"zbs", RISCVExtensionVersion{1, 0}},
     {"zbt", RISCVExtensionVersion{0, 93}},
 
-    {"zvamo", RISCVExtensionVersion{0, 10}},
     {"zvlsseg", RISCVExtensionVersion{0, 10}},
 
     {"zfhmin", RISCVExtensionVersion{0, 1}},
@@ -286,10 +285,6 @@ void RISCVISAInfo::toFeatures(
     if (ExtName == "zvlsseg") {
       Features.push_back("+experimental-v");
       Features.push_back("+experimental-zvlsseg");
-    } else if (ExtName == "zvamo") {
-      Features.push_back("+experimental-v");
-      Features.push_back("+experimental-zvlsseg");
-      Features.push_back("+experimental-zvamo");
     } else if (isExperimentalExtension(ExtName)) {
       Features.push_back(StrAlloc("+experimental-" + ExtName));
     } else {

diff  --git a/llvm/lib/Target/RISCV/RISCV.td b/llvm/lib/Target/RISCV/RISCV.td
index 772a4f8ecd535..6aa915c019290 100644
--- a/llvm/lib/Target/RISCV/RISCV.td
+++ b/llvm/lib/Target/RISCV/RISCV.td
@@ -168,14 +168,6 @@ def HasStdExtZvlsseg : Predicate<"Subtarget->hasStdExtZvlsseg()">,
                                  AssemblerPredicate<(all_of FeatureStdExtZvlsseg),
                                  "'Zvlsseg' (Vector segment load/store instructions)">;
 
-def FeatureStdExtZvamo
-    : SubtargetFeature<"experimental-zvamo", "HasStdExtZvamo", "true",
-                       "'Zvamo' (Vector AMO Operations)",
-                       [FeatureStdExtV]>;
-def HasStdExtZvamo : Predicate<"Subtarget->hasStdExtZvamo()">,
-                               AssemblerPredicate<(all_of FeatureStdExtZvamo),
-                               "'Zvamo' (Vector AMO Operations)">;
-
 def Feature64Bit
     : SubtargetFeature<"64bit", "HasRV64", "true", "Implements RV64">;
 def IsRV64 : Predicate<"Subtarget->is64Bit()">,

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrFormatsV.td b/llvm/lib/Target/RISCV/RISCVInstrFormatsV.td
index 80f46b73bfd73..69e9d3553b307 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrFormatsV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrFormatsV.td
@@ -45,19 +45,6 @@ def SUMOPUnitStride  : RISCVLSUMOP<0b00000>;
 def SUMOPUnitStrideMask : RISCVLSUMOP<0b01011>;
 def SUMOPUnitStrideWholeReg : RISCVLSUMOP<0b01000>;
 
-class RISCVAMOOP<bits<5> val> {
-  bits<5> Value = val;
-}
-def AMOOPVamoSwap : RISCVAMOOP<0b00001>;
-def AMOOPVamoAdd : RISCVAMOOP<0b00000>;
-def AMOOPVamoXor : RISCVAMOOP<0b00100>;
-def AMOOPVamoAnd : RISCVAMOOP<0b01100>;
-def AMOOPVamoOr : RISCVAMOOP<0b01000>;
-def AMOOPVamoMin : RISCVAMOOP<0b10000>;
-def AMOOPVamoMax : RISCVAMOOP<0b10100>;
-def AMOOPVamoMinu : RISCVAMOOP<0b11000>;
-def AMOOPVamoMaxu : RISCVAMOOP<0b11100>;
-
 class RISCVWidth<bits<4> val> {
   bits<4> Value = val;
 }
@@ -342,22 +329,3 @@ class RVInstVSX<bits<3> nf, bit mew, RISCVMOP mop, bits<3> width,
 
   let Uses = [VTYPE, VL];
 }
-
-class RVInstVAMO<RISCVAMOOP amoop, bits<3> width, dag outs, 
-                 dag ins, string opcodestr, string argstr>
-    : RVInst<outs, ins, opcodestr, argstr, [], InstFormatR> {
-  bits<5> vs2;
-  bits<5> rs1;
-  bit wd;
-  bit vm;
-
-  let Inst{31-27} = amoop.Value;
-  let Inst{26} = wd;
-  let Inst{25} = vm;
-  let Inst{24-20} = vs2;
-  let Inst{19-15} = rs1;
-  let Inst{14-12} = width;
-  let Opcode = OPC_AMO.Value;
-
-  let Uses = [VTYPE, VL];
-}

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index a906e88c22103..173ae43a08d67 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -338,29 +338,6 @@ class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
                opcodestr, "$vd, $vs2$vm">;
 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
 
-let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in {
-// vamo vd, (rs1), vs2, vd, vm
-class VAMOWd<RISCVAMOOP amoop, RISCVWidth width, string opcodestr>
-    : RVInstVAMO<amoop, width.Value{2-0}, (outs VR:$vd_wd),
-            (ins GPR:$rs1, VR:$vs2, VR:$vd, VMaskOp:$vm),
-            opcodestr, "$vd_wd, (${rs1}), $vs2, $vd$vm"> {
-    let Constraints = "$vd_wd = $vd";
-    let wd = 1;
-    bits<5> vd;
-    let Inst{11-7} = vd;
-}
-
-// vamo x0, (rs1), vs2, vs3, vm
-class VAMONoWd<RISCVAMOOP amoop, RISCVWidth width, string opcodestr>
-    : RVInstVAMO<amoop, width.Value{2-0}, (outs),
-            (ins GPR:$rs1, VR:$vs2, VR:$vs3, VMaskOp:$vm),
-            opcodestr, "x0, (${rs1}), $vs2, $vs3$vm"> {
-    bits<5> vs3;
-    let Inst{11-7} = vs3;
-}
-
-} // hasSideEffects = 0, mayLoad = 1, mayStore = 1
-
 //===----------------------------------------------------------------------===//
 // Combination of instruction classes.
 // Use these multiclasses to define instructions more easily.
@@ -779,11 +756,6 @@ multiclass VCPR_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
            Sched<[WriteVCompressV, ReadVCompressV, ReadVCompressV]>;
 }
 
-multiclass VAMO<RISCVAMOOP amoop, RISCVWidth width, string opcodestr> {
-  def _WD : VAMOWd<amoop, width, opcodestr>;
-  def _UNWD : VAMONoWd<amoop, width, opcodestr>;
-}
-
 multiclass VWholeLoadN<bits<3> nf, string opcodestr, RegisterClass VRC> {
   foreach l = [8, 16, 32, 64] in {
     defvar w = !cast<RISCVWidth>("LSWidth" # l);
@@ -822,7 +794,7 @@ foreach eew = [8, 16, 32, 64] in {
   // Vector Strided Instructions
   def VLSE#eew#_V  : VStridedLoad<w,  "vlse"#eew#".v">, VLSSched<eew>;
   def VSSE#eew#_V  : VStridedStore<w,  "vsse"#eew#".v">, VSSSched<eew>;
-  
+
   // Vector Indexed Instructions
   def VLUXEI#eew#_V :
     VIndexedLoad<MOPLDIndexedUnord, w, "vluxei"#eew#".v">, VLXSched<eew, "U">;
@@ -1469,31 +1441,4 @@ let Predicates = [HasStdExtZvlsseg] in {
   }
 } // Predicates = [HasStdExtZvlsseg]
 
-let Predicates = [HasStdExtZvamo, HasStdExtA] in {
-  foreach eew = [8, 16, 32] in {
-    defvar w = !cast<RISCVWidth>("LSWidth"#eew);
-    defm VAMOSWAPEI#eew : VAMO<AMOOPVamoSwap, w, "vamoswapei"#eew#".v">;
-    defm VAMOADDEI#eew : VAMO<AMOOPVamoAdd, w, "vamoaddei"#eew#".v">;
-    defm VAMOXOREI#eew : VAMO<AMOOPVamoXor, w, "vamoxorei"#eew#".v">;
-    defm VAMOANDEI#eew : VAMO<AMOOPVamoAnd, w, "vamoandei"#eew#".v">;
-    defm VAMOOREI#eew : VAMO<AMOOPVamoOr, w, "vamoorei"#eew#".v">;
-    defm VAMOMINEI#eew : VAMO<AMOOPVamoMin, w, "vamominei"#eew#".v">;
-    defm VAMOMAXEI#eew : VAMO<AMOOPVamoMax, w, "vamomaxei"#eew#".v">;
-    defm VAMOMINUEI#eew : VAMO<AMOOPVamoMinu, w, "vamominuei"#eew#".v">;
-    defm VAMOMAXUEI#eew : VAMO<AMOOPVamoMaxu, w, "vamomaxuei"#eew#".v">;
-  }
-} // Predicates = [HasStdExtZvamo, HasStdExtA]
-
-let Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64] in {
-  defm VAMOSWAPEI64 : VAMO<AMOOPVamoSwap, LSWidth64, "vamoswapei64.v">;
-  defm VAMOADDEI64 : VAMO<AMOOPVamoAdd, LSWidth64, "vamoaddei64.v">;
-  defm VAMOXOREI64 : VAMO<AMOOPVamoXor, LSWidth64, "vamoxorei64.v">;
-  defm VAMOANDEI64 : VAMO<AMOOPVamoAnd, LSWidth64, "vamoandei64.v">;
-  defm VAMOOREI64 : VAMO<AMOOPVamoOr, LSWidth64, "vamoorei64.v">;
-  defm VAMOMINEI64 : VAMO<AMOOPVamoMin, LSWidth64, "vamominei64.v">;
-  defm VAMOMAXEI64 : VAMO<AMOOPVamoMax, LSWidth64, "vamomaxei64.v">;
-  defm VAMOMINUEI64 : VAMO<AMOOPVamoMinu, LSWidth64, "vamominuei64.v">;
-  defm VAMOMAXUEI64 : VAMO<AMOOPVamoMaxu, LSWidth64, "vamomaxuei64.v">;
-} // Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64]
-
 include "RISCVInstrInfoVPseudos.td"

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 4c6e6655d5ecc..073fa605e0fba 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1124,68 +1124,6 @@ class VPseudoTernaryNoMaskWithPolicy<VReg RetClass,
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
-class VPseudoAMOWDNoMask<VReg RetClass,
-                         VReg Op1Class> :
-        Pseudo<(outs GetVRegNoV0<RetClass>.R:$vd_wd),
-               (ins GPR:$rs1,
-                    Op1Class:$vs2,
-                    GetVRegNoV0<RetClass>.R:$vd,
-                    AVL:$vl, ixlenimm:$sew), []>,
-        RISCVVPseudo {
-  let mayLoad = 1;
-  let mayStore = 1;
-  let hasSideEffects = 1;
-  let Constraints = "$vd_wd = $vd";
-  let HasVLOp = 1;
-  let HasSEWOp = 1;
-  let HasDummyMask = 1;
-  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
-}
-
-class VPseudoAMOWDMask<VReg RetClass,
-                       VReg Op1Class> :
-        Pseudo<(outs GetVRegNoV0<RetClass>.R:$vd_wd),
-               (ins GPR:$rs1,
-                    Op1Class:$vs2,
-                    GetVRegNoV0<RetClass>.R:$vd,
-                    VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
-        RISCVVPseudo {
-  let mayLoad = 1;
-  let mayStore = 1;
-  let hasSideEffects = 1;
-  let Constraints = "$vd_wd = $vd";
-  let HasVLOp = 1;
-  let HasSEWOp = 1;
-  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
-}
-
-multiclass VPseudoAMOEI<int eew> {
-  // Standard scalar AMO supports 32, 64, and 128 Mem data bits,
-  // and in the base vector "V" extension, only SEW up to ELEN = max(XLEN, FLEN)
-  // are required to be supported.
-  // therefore only [32, 64] is allowed here.
-  foreach sew = [32, 64] in {
-    foreach lmul = MxSet<sew>.m in {
-      defvar octuple_lmul = lmul.octuple;
-      // Calculate emul = eew * lmul / sew
-      defvar octuple_emul = !srl(!mul(eew, octuple_lmul), log2<sew>.val);
-      if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
-        defvar emulMX = octuple_to_str<octuple_emul>.ret;
-        defvar emul= !cast<LMULInfo>("V_" # emulMX);
-        let VLMul = lmul.value in {
-          def "_WD_" # lmul.MX # "_" # emulMX : VPseudoAMOWDNoMask<lmul.vrclass, emul.vrclass>;
-          def "_WD_" # lmul.MX # "_" # emulMX # "_MASK" : VPseudoAMOWDMask<lmul.vrclass, emul.vrclass>;
-        }
-      }
-    }
-  }
-}
-
-multiclass VPseudoAMO {
-  foreach eew = EEWList in
-  defm "EI" # eew : VPseudoAMOEI<eew>;
-}
-
 class VPseudoUSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
       Pseudo<(outs RetClass:$rd),
              (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
@@ -2927,42 +2865,6 @@ class VPatTernaryMask<string intrinsic,
                     (mask_type V0),
                     GPR:$vl, sew)>;
 
-class VPatAMOWDNoMask<string intrinsic_name,
-                    string inst,
-                    ValueType result_type,
-                    ValueType op1_type,
-                    int sew,
-                    LMULInfo vlmul,
-                    LMULInfo emul,
-                    VReg op1_reg_class> :
-  Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
-                    GPR:$rs1,
-                    (op1_type op1_reg_class:$vs2),
-                    (result_type vlmul.vrclass:$vd),
-                    VLOpFrag)),
-                   (!cast<Instruction>(inst # "_WD_" # vlmul.MX # "_" # emul.MX)
-                    $rs1, $vs2, $vd,
-                    GPR:$vl, sew)>;
-
-class VPatAMOWDMask<string intrinsic_name,
-                    string inst,
-                    ValueType result_type,
-                    ValueType op1_type,
-                    ValueType mask_type,
-                    int sew,
-                    LMULInfo vlmul,
-                    LMULInfo emul,
-                    VReg op1_reg_class> :
-  Pat<(result_type (!cast<Intrinsic>(intrinsic_name # "_mask")
-                    GPR:$rs1,
-                    (op1_type op1_reg_class:$vs2),
-                    (result_type vlmul.vrclass:$vd),
-                    (mask_type V0),
-                    VLOpFrag)),
-                   (!cast<Instruction>(inst # "_WD_" # vlmul.MX # "_" # emul.MX # "_MASK")
-                    $rs1, $vs2, $vd,
-                    (mask_type V0), GPR:$vl, sew)>;
-
 multiclass VPatUnaryS_M<string intrinsic_name,
                              string inst>
 {
@@ -3800,44 +3702,6 @@ multiclass VPatConversionVF_WF <string intrinsic, string instruction> {
   }
 }
 
-multiclass VPatAMOWD<string intrinsic,
-                     string inst,
-                     ValueType result_type,
-                     ValueType offset_type,
-                     ValueType mask_type,
-                     int sew,
-                     LMULInfo vlmul,
-                     LMULInfo emul,
-                     VReg op1_reg_class>
-{
-  def : VPatAMOWDNoMask<intrinsic, inst, result_type, offset_type,
-                        sew, vlmul, emul, op1_reg_class>;
-  def : VPatAMOWDMask<intrinsic, inst, result_type, offset_type,
-                      mask_type, sew, vlmul, emul, op1_reg_class>;
-}
-
-multiclass VPatAMOV_WD<string intrinsic,
-                       string inst,
-                       list<VTypeInfo> vtilist> {
-  foreach eew = EEWList in {
-    foreach vti = vtilist in {
-      if !or(!eq(vti.SEW, 32), !eq(vti.SEW, 64)) then {
-        defvar octuple_lmul = vti.LMul.octuple;
-        // Calculate emul = eew * lmul / sew
-        defvar octuple_emul = !srl(!mul(eew, octuple_lmul), vti.Log2SEW);
-        if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then {
-          defvar emulMX = octuple_to_str<octuple_emul>.ret;
-          defvar offsetVti = !cast<VTypeInfo>("VI" # eew # emulMX);
-          defvar inst_ei = inst # "EI" # eew;
-          defm : VPatAMOWD<intrinsic, inst_ei,
-                           vti.Vector, offsetVti.Vector,
-                           vti.Mask, vti.Log2SEW, vti.LMul, offsetVti.LMul, offsetVti.RegClass>;
-        }
-      }
-    }
-  }
-}
-
 //===----------------------------------------------------------------------===//
 // Pseudo instructions
 //===----------------------------------------------------------------------===//
@@ -3965,19 +3829,6 @@ defm PseudoVSUXSEG : VPseudoISegStore</*Ordered=*/false>;
 let hasSideEffects = 1, Defs = [VL] in
 defm PseudoVLSEG : VPseudoUSSegLoad</*isFF=*/true>;
 
-//===----------------------------------------------------------------------===//
-// 8. Vector AMO Operations
-//===----------------------------------------------------------------------===//
-defm PseudoVAMOSWAP : VPseudoAMO;
-defm PseudoVAMOADD : VPseudoAMO;
-defm PseudoVAMOXOR : VPseudoAMO;
-defm PseudoVAMOAND : VPseudoAMO;
-defm PseudoVAMOOR : VPseudoAMO;
-defm PseudoVAMOMIN : VPseudoAMO;
-defm PseudoVAMOMAX : VPseudoAMO;
-defm PseudoVAMOMINU : VPseudoAMO;
-defm PseudoVAMOMAXU : VPseudoAMO;
-
 //===----------------------------------------------------------------------===//
 // 12. Vector Integer Arithmetic Instructions
 //===----------------------------------------------------------------------===//
@@ -4533,25 +4384,6 @@ defm PseudoVCOMPRESS : VPseudoVCPR_V;
 // Patterns.
 //===----------------------------------------------------------------------===//
 
-//===----------------------------------------------------------------------===//
-// 8. Vector AMO Operations
-//===----------------------------------------------------------------------===//
-let Predicates = [HasStdExtZvamo] in {
-  defm : VPatAMOV_WD<"int_riscv_vamoswap", "PseudoVAMOSWAP", AllIntegerVectors>;
-  defm : VPatAMOV_WD<"int_riscv_vamoadd", "PseudoVAMOADD", AllIntegerVectors>;
-  defm : VPatAMOV_WD<"int_riscv_vamoxor", "PseudoVAMOXOR", AllIntegerVectors>;
-  defm : VPatAMOV_WD<"int_riscv_vamoand", "PseudoVAMOAND", AllIntegerVectors>;
-  defm : VPatAMOV_WD<"int_riscv_vamoor", "PseudoVAMOOR", AllIntegerVectors>;
-  defm : VPatAMOV_WD<"int_riscv_vamomin", "PseudoVAMOMIN", AllIntegerVectors>;
-  defm : VPatAMOV_WD<"int_riscv_vamomax", "PseudoVAMOMAX", AllIntegerVectors>;
-  defm : VPatAMOV_WD<"int_riscv_vamominu", "PseudoVAMOMINU", AllIntegerVectors>;
-  defm : VPatAMOV_WD<"int_riscv_vamomaxu", "PseudoVAMOMAXU", AllIntegerVectors>;
-} // Predicates = [HasStdExtZvamo]
-
-let Predicates = [HasStdExtZvamo, HasVInstructionsAnyF] in {
-  defm : VPatAMOV_WD<"int_riscv_vamoswap", "PseudoVAMOSWAP", AllFloatVectors>;
-} // Predicates = [HasStdExtZvamo, HasVInstructionsAnyF]
-
 //===----------------------------------------------------------------------===//
 // 12. Vector Integer Arithmetic Instructions
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/lib/Target/RISCV/RISCVSchedRocket.td b/llvm/lib/Target/RISCV/RISCVSchedRocket.td
index b24ea4ade3e6f..d5a0932c8778d 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedRocket.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedRocket.td
@@ -17,7 +17,7 @@ def RocketModel : SchedMachineModel {
   let LoadLatency = 3;
   let MispredictPenalty = 3;
   let CompleteModel = false;
-  let UnsupportedFeatures = [HasStdExtV, HasStdExtZvamo, HasStdExtZvlsseg];
+  let UnsupportedFeatures = [HasStdExtV, HasStdExtZvlsseg];
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
index 5b435fcb16a2f..7f9d0aabc4ed1 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
@@ -15,7 +15,7 @@ def SiFive7Model : SchedMachineModel {
   let LoadLatency = 3;
   let MispredictPenalty = 3;
   let CompleteModel = 0;
-  let UnsupportedFeatures = [HasStdExtV, HasStdExtZvamo, HasStdExtZvlsseg];
+  let UnsupportedFeatures = [HasStdExtV, HasStdExtZvlsseg];
 }
 
 // The SiFive7 microarchitecture has two pipelines: A and B.

diff  --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index deb2a11f98f10..d0330e6984a5e 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -51,7 +51,6 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
   bool HasStdExtZbt = false;
   bool HasStdExtV = false;
   bool HasStdExtZvlsseg = false;
-  bool HasStdExtZvamo = false;
   bool HasStdExtZfhmin = false;
   bool HasStdExtZfh = false;
   bool HasRV64 = false;
@@ -118,7 +117,6 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
   bool hasStdExtZbt() const { return HasStdExtZbt; }
   bool hasStdExtV() const { return HasStdExtV; }
   bool hasStdExtZvlsseg() const { return HasStdExtZvlsseg; }
-  bool hasStdExtZvamo() const { return HasStdExtZvamo; }
   bool hasStdExtZfhmin() const { return HasStdExtZfhmin; }
   bool hasStdExtZfh() const { return HasStdExtZfh; }
   bool is64Bit() const { return HasRV64; }

diff  --git a/llvm/test/CodeGen/RISCV/attributes.ll b/llvm/test/CodeGen/RISCV/attributes.ll
index c0944a533762a..828b8fe72ad02 100644
--- a/llvm/test/CodeGen/RISCV/attributes.ll
+++ b/llvm/test/CodeGen/RISCV/attributes.ll
@@ -5,7 +5,6 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+f %s -o - | FileCheck --check-prefix=RV32F %s
 ; RUN: llc -mtriple=riscv32 -mattr=+d %s -o - | FileCheck --check-prefix=RV32D %s
 ; RUN: llc -mtriple=riscv32 -mattr=+c %s -o - | FileCheck --check-prefix=RV32C %s
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo,+experimental-zvlsseg %s -o - | FileCheck --check-prefix=RV32V %s
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zfhmin %s -o - | FileCheck --check-prefix=RV32ZFHMIN %s
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zfh %s -o - | FileCheck --check-prefix=RV32ZFH %s
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zba %s -o - | FileCheck --check-prefix=RV32ZBA %s
@@ -18,13 +17,11 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbr %s -o - | FileCheck --check-prefix=RV32ZBR %s
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbs %s -o - | FileCheck --check-prefix=RV32ZBS %s
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt %s -o - | FileCheck --check-prefix=RV32ZBT %s
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbb,+experimental-zfh,+experimental-zvamo,+experimental-v,+f,+experimental-zvlsseg %s -o - | FileCheck --check-prefix=RV32COMBINED %s
 ; RUN: llc -mtriple=riscv64 -mattr=+m %s -o - | FileCheck --check-prefix=RV64M %s
 ; RUN: llc -mtriple=riscv64 -mattr=+a %s -o - | FileCheck --check-prefix=RV64A %s
 ; RUN: llc -mtriple=riscv64 -mattr=+f %s -o - | FileCheck --check-prefix=RV64F %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d %s -o - | FileCheck --check-prefix=RV64D %s
 ; RUN: llc -mtriple=riscv64 -mattr=+c %s -o - | FileCheck --check-prefix=RV64C %s
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo,+experimental-zvlsseg %s -o - | FileCheck --check-prefix=RV64V %s
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zfhmin %s -o - | FileCheck --check-prefix=RV64ZFHMIN %s
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zfh %s -o - | FileCheck --check-prefix=RV64ZFH %s
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zba %s -o - | FileCheck --check-prefix=RV64ZBA %s
@@ -37,14 +34,13 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbr %s -o - | FileCheck --check-prefix=RV64ZBR %s
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbs %s -o - | FileCheck --check-prefix=RV64ZBS %s
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt %s -o - | FileCheck --check-prefix=RV64ZBT %s
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbb,+experimental-zfh,+experimental-zvamo,+experimental-v,+f,+experimental-zvlsseg %s -o - | FileCheck --check-prefix=RV64COMBINED %s
 
 ; RV32M: .attribute 5, "rv32i2p0_m2p0"
 ; RV32A: .attribute 5, "rv32i2p0_a2p0"
 ; RV32F: .attribute 5, "rv32i2p0_f2p0"
 ; RV32D: .attribute 5, "rv32i2p0_f2p0_d2p0"
 ; RV32C: .attribute 5, "rv32i2p0_c2p0"
-; RV32V: .attribute 5, "rv32i2p0_v0p10_zvamo0p10_zvlsseg0p10"
+; RV32V: .attribute 5, "rv32i2p0_v0p10_zvlsseg0p10"
 ; RV32ZFHMIN: .attribute 5, "rv32i2p0_f2p0_zfhmin0p1"
 ; RV32ZFH: .attribute 5, "rv32i2p0_f2p0_zfh0p1_zfhmin0p1"
 ; RV32ZBA: .attribute 5, "rv32i2p0_zba1p0"
@@ -57,7 +53,7 @@
 ; RV32ZBR: .attribute 5, "rv32i2p0_zbr0p93"
 ; RV32ZBS: .attribute 5, "rv32i2p0_zbs1p0"
 ; RV32ZBT: .attribute 5, "rv32i2p0_zbt0p93"
-; RV32COMBINED: .attribute 5, "rv32i2p0_f2p0_v0p10_zfh0p1_zfhmin0p1_zbb1p0_zvamo0p10_zvlsseg0p10"
+; RV32COMBINED: .attribute 5, "rv32i2p0_f2p0_v0p10_zfh0p1_zfhmin0p1_zbb1p0_zvlsseg0p10"
 
 ; RV64M: .attribute 5, "rv64i2p0_m2p0"
 ; RV64A: .attribute 5, "rv64i2p0_a2p0"
@@ -76,8 +72,8 @@
 ; RV64ZBR: .attribute 5, "rv64i2p0_zbr0p93"
 ; RV64ZBS: .attribute 5, "rv64i2p0_zbs1p0"
 ; RV64ZBT: .attribute 5, "rv64i2p0_zbt0p93"
-; RV64V: .attribute 5, "rv64i2p0_v0p10_zvamo0p10_zvlsseg0p10"
-; RV64COMBINED: .attribute 5, "rv64i2p0_f2p0_v0p10_zfh0p1_zfhmin0p1_zbb1p0_zvamo0p10_zvlsseg0p10"
+; RV64V: .attribute 5, "rv64i2p0_v0p10_zvlsseg0p10"
+; RV64COMBINED: .attribute 5, "rv64i2p0_f2p0_v0p10_zfh0p1_zfhmin0p1_zbb1p0_zvlsseg0p10"
 
 
 define i32 @addi(i32 %a) {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll
deleted file mode 100644
index 4f8bda92648b6..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll
deleted file mode 100644
index d70f14def776e..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoadd_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoaddei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll
deleted file mode 100644
index 32c8e44f98eda..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoand_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoand_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoand_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoand_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoand_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoand_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll
deleted file mode 100644
index 7e0d2211136c3..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoand_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoand_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoand_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoand_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoand_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoand_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoand_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoand_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoand_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoand_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoand_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoandei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoand_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoandei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoand_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoandei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoand_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoandei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll
deleted file mode 100644
index e3e3d2597ce03..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomax_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomax_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomax_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomax_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomax_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomax_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll
deleted file mode 100644
index 309d79b508b7f..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomax_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomax_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomax_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomax_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomax_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomax_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomax_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomax_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomax_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomax_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomax_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomax_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomax_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomax_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll
deleted file mode 100644
index 8c455c9e84728..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll
deleted file mode 100644
index 5a1e95d83435a..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomaxu_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamomaxuei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll
deleted file mode 100644
index 40ff958ff16c1..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomin_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomin_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomin_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomin_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomin_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamomin_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll
deleted file mode 100644
index 1b6f0ede8f77d..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomin_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomin_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomin_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomin_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamomin_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamomin_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamomin_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamomin_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomin_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamomin_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamomin_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamomin_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamomin_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamomin_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll
deleted file mode 100644
index 15311234d4091..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamominu_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamominu_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamominu_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamominu_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamominu_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamominu_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll
deleted file mode 100644
index e0ef867ae548e..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamominu_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamominu_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamominu_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamominu_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamominu_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamominu_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamominu_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamominu_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamominu_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamominu_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamominu_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamominuei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamominu_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamominuei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamominu_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamominuei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamominu_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamominuei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll
deleted file mode 100644
index ccaff9b5eaf66..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoor_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoor_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoor_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoor_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoor_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoor_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll
deleted file mode 100644
index afe46e334158a..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoor_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoor_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoor_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoor_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoor_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoor_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoor_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoor_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoor_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoor_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoor_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoorei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoor_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoorei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoor_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoorei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoor_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoorei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll
deleted file mode 100644
index 12bd2982829df..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll
+++ /dev/null
@@ -1,3362 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x float>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i64(<vscale x 1 x float> *%0, <vscale x 1 x i64> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x float> %2,
-    i32 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i64(<vscale x 1 x float> *%0, <vscale x 1 x i64> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i64(<vscale x 2 x float> *%0, <vscale x 2 x i64> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i64(<vscale x 2 x float> *%0, <vscale x 2 x i64> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x float>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i64(<vscale x 4 x float> *%0, <vscale x 4 x i64> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x float> %2,
-    i32 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i64(<vscale x 4 x float> *%0, <vscale x 4 x i64> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x float>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i64(<vscale x 8 x float> *%0, <vscale x 8 x i64> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x float> %2,
-    i32 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i64(<vscale x 8 x float> *%0, <vscale x 8 x i64> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i64(<vscale x 1 x double> *%0, <vscale x 1 x i64> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i64(<vscale x 1 x double> *%0, <vscale x 1 x i64> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x double>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i64(<vscale x 2 x double> *%0, <vscale x 2 x i64> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x double> %2,
-    i32 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i64(<vscale x 2 x double> *%0, <vscale x 2 x i64> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x double>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i64(<vscale x 4 x double> *%0, <vscale x 4 x i64> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x double> %2,
-    i32 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i64(<vscale x 4 x double> *%0, <vscale x 4 x i64> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x double>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i64(<vscale x 8 x double> *%0, <vscale x 8 x i64> %1, <vscale x 8 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x double> %2,
-    i32 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i64(<vscale x 8 x double> *%0, <vscale x 8 x i64> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoswap_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x float>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i32(<vscale x 1 x float> *%0, <vscale x 1 x i32> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x float> %2,
-    i32 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i32(<vscale x 1 x float> *%0, <vscale x 1 x i32> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i32(<vscale x 2 x float> *%0, <vscale x 2 x i32> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i32(<vscale x 2 x float> *%0, <vscale x 2 x i32> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x float>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i32(<vscale x 4 x float> *%0, <vscale x 4 x i32> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x float> %2,
-    i32 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i32(<vscale x 4 x float> *%0, <vscale x 4 x i32> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x float>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i32(<vscale x 8 x float> *%0, <vscale x 8 x i32> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x float> %2,
-    i32 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i32(<vscale x 8 x float> *%0, <vscale x 8 x i32> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32(
-  <vscale x 16 x float>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x float>,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vamoswap_v_nxv16f32_nxv16i32(<vscale x 16 x float> *%0, <vscale x 16 x i32> %1, <vscale x 16 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32(
-    <vscale x 16 x float> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x float> %2,
-    i32 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32(
-  <vscale x 16 x float>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i32(<vscale x 16 x float> *%0, <vscale x 16 x i32> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32(
-    <vscale x 16 x float> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i32(<vscale x 1 x double> *%0, <vscale x 1 x i32> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i32(<vscale x 1 x double> *%0, <vscale x 1 x i32> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x double>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i32(<vscale x 2 x double> *%0, <vscale x 2 x i32> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x double> %2,
-    i32 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i32(<vscale x 2 x double> *%0, <vscale x 2 x i32> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x double>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i32(<vscale x 4 x double> *%0, <vscale x 4 x i32> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x double> %2,
-    i32 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i32(<vscale x 4 x double> *%0, <vscale x 4 x i32> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x double>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i32(<vscale x 8 x double> *%0, <vscale x 8 x i32> %1, <vscale x 8 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x double> %2,
-    i32 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i32(<vscale x 8 x double> *%0, <vscale x 8 x i32> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoswap_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x float>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i16(<vscale x 1 x float> *%0, <vscale x 1 x i16> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x float> %2,
-    i32 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i16(<vscale x 1 x float> *%0, <vscale x 1 x i16> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i16(<vscale x 2 x float> *%0, <vscale x 2 x i16> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i16(<vscale x 2 x float> *%0, <vscale x 2 x i16> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x float>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i16(<vscale x 4 x float> *%0, <vscale x 4 x i16> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x float> %2,
-    i32 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i16(<vscale x 4 x float> *%0, <vscale x 4 x i16> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x float>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i16(<vscale x 8 x float> *%0, <vscale x 8 x i16> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x float> %2,
-    i32 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i16(<vscale x 8 x float> *%0, <vscale x 8 x i16> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16(
-  <vscale x 16 x float>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x float>,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vamoswap_v_nxv16f32_nxv16i16(<vscale x 16 x float> *%0, <vscale x 16 x i16> %1, <vscale x 16 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16(
-    <vscale x 16 x float> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x float> %2,
-    i32 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16(
-  <vscale x 16 x float>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i16(<vscale x 16 x float> *%0, <vscale x 16 x i16> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16(
-    <vscale x 16 x float> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i16(<vscale x 1 x double> *%0, <vscale x 1 x i16> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i16(<vscale x 1 x double> *%0, <vscale x 1 x i16> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x double>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i16(<vscale x 2 x double> *%0, <vscale x 2 x i16> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x double> %2,
-    i32 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i16(<vscale x 2 x double> *%0, <vscale x 2 x i16> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x double>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i16(<vscale x 4 x double> *%0, <vscale x 4 x i16> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x double> %2,
-    i32 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i16(<vscale x 4 x double> *%0, <vscale x 4 x i16> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x double>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i16(<vscale x 8 x double> *%0, <vscale x 8 x i16> %1, <vscale x 8 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x double> %2,
-    i32 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i16(<vscale x 8 x double> *%0, <vscale x 8 x i16> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoswap_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x float>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i8(<vscale x 1 x float> *%0, <vscale x 1 x i8> %1, <vscale x 1 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x float> %2,
-    i32 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i8(<vscale x 1 x float> *%0, <vscale x 1 x i8> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i8(<vscale x 2 x float> *%0, <vscale x 2 x i8> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i8(<vscale x 2 x float> *%0, <vscale x 2 x i8> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x float>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i8(<vscale x 4 x float> *%0, <vscale x 4 x i8> %1, <vscale x 4 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x float> %2,
-    i32 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i8(<vscale x 4 x float> *%0, <vscale x 4 x i8> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x float>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i8(<vscale x 8 x float> *%0, <vscale x 8 x i8> %1, <vscale x 8 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x float> %2,
-    i32 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i8(<vscale x 8 x float> *%0, <vscale x 8 x i8> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8(
-  <vscale x 16 x float>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x float>,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vamoswap_v_nxv16f32_nxv16i8(<vscale x 16 x float> *%0, <vscale x 16 x i8> %1, <vscale x 16 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv16f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8(
-    <vscale x 16 x float> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x float> %2,
-    i32 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8(
-  <vscale x 16 x float>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i8(<vscale x 16 x float> *%0, <vscale x 16 x i8> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8(
-    <vscale x 16 x float> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i8(<vscale x 1 x double> *%0, <vscale x 1 x i8> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i8(<vscale x 1 x double> *%0, <vscale x 1 x i8> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x double>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i8(<vscale x 2 x double> *%0, <vscale x 2 x i8> %1, <vscale x 2 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x double> %2,
-    i32 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i8(<vscale x 2 x double> *%0, <vscale x 2 x i8> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x double>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i8(<vscale x 4 x double> *%0, <vscale x 4 x i8> %1, <vscale x 4 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x double> %2,
-    i32 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i8(<vscale x 4 x double> *%0, <vscale x 4 x i8> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x double>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i8(<vscale x 8 x double> *%0, <vscale x 8 x i8> %1, <vscale x 8 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x double> %2,
-    i32 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i8(<vscale x 8 x double> *%0, <vscale x 8 x i8> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll
deleted file mode 100644
index ac20713b659be..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll
+++ /dev/null
@@ -1,3362 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i64(<vscale x 1 x float> *%0, <vscale x 1 x i64> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i64(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i64(<vscale x 1 x float> *%0, <vscale x 1 x i64> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i64(<vscale x 2 x float> *%0, <vscale x 2 x i64> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i64(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i64(<vscale x 2 x float> *%0, <vscale x 2 x i64> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i64(<vscale x 4 x float> *%0, <vscale x 4 x i64> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i64(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i64(<vscale x 4 x float> *%0, <vscale x 4 x i64> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i64(<vscale x 8 x float> *%0, <vscale x 8 x i64> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i64(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i64(<vscale x 8 x float> *%0, <vscale x 8 x i64> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i64(<vscale x 1 x double> *%0, <vscale x 1 x i64> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i64(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i64(<vscale x 1 x double> *%0, <vscale x 1 x i64> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i64(<vscale x 2 x double> *%0, <vscale x 2 x i64> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i64(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i64(<vscale x 2 x double> *%0, <vscale x 2 x i64> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i64(<vscale x 4 x double> *%0, <vscale x 4 x i64> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i64(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i64(<vscale x 4 x double> *%0, <vscale x 4 x i64> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i64(<vscale x 8 x double> *%0, <vscale x 8 x i64> %1, <vscale x 8 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i64(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x double> %2,
-    i64 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i64(<vscale x 8 x double> *%0, <vscale x 8 x i64> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoswap_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i32(<vscale x 1 x float> *%0, <vscale x 1 x i32> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i32(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i32(<vscale x 1 x float> *%0, <vscale x 1 x i32> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i32(<vscale x 2 x float> *%0, <vscale x 2 x i32> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i32(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i32(<vscale x 2 x float> *%0, <vscale x 2 x i32> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i32(<vscale x 4 x float> *%0, <vscale x 4 x i32> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i32(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i32(<vscale x 4 x float> *%0, <vscale x 4 x i32> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i32(<vscale x 8 x float> *%0, <vscale x 8 x i32> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i32(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i32(<vscale x 8 x float> *%0, <vscale x 8 x i32> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32(
-  <vscale x 16 x float>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vamoswap_v_nxv16f32_nxv16i32(<vscale x 16 x float> *%0, <vscale x 16 x i32> %1, <vscale x 16 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i32(
-    <vscale x 16 x float> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x float> %2,
-    i64 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32(
-  <vscale x 16 x float>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i32(<vscale x 16 x float> *%0, <vscale x 16 x i32> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32(
-    <vscale x 16 x float> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i32(<vscale x 1 x double> *%0, <vscale x 1 x i32> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i32(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i32(<vscale x 1 x double> *%0, <vscale x 1 x i32> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i32(<vscale x 2 x double> *%0, <vscale x 2 x i32> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i32(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i32(<vscale x 2 x double> *%0, <vscale x 2 x i32> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i32(<vscale x 4 x double> *%0, <vscale x 4 x i32> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i32(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i32(<vscale x 4 x double> *%0, <vscale x 4 x i32> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i32(<vscale x 8 x double> *%0, <vscale x 8 x i32> %1, <vscale x 8 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i32(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x double> %2,
-    i64 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i32(<vscale x 8 x double> *%0, <vscale x 8 x i32> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoswap_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i16(<vscale x 1 x float> *%0, <vscale x 1 x i16> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i16(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i16(<vscale x 1 x float> *%0, <vscale x 1 x i16> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i16(<vscale x 2 x float> *%0, <vscale x 2 x i16> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i16(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i16(<vscale x 2 x float> *%0, <vscale x 2 x i16> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i16(<vscale x 4 x float> *%0, <vscale x 4 x i16> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i16(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i16(<vscale x 4 x float> *%0, <vscale x 4 x i16> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i16(<vscale x 8 x float> *%0, <vscale x 8 x i16> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i16(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i16(<vscale x 8 x float> *%0, <vscale x 8 x i16> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16(
-  <vscale x 16 x float>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vamoswap_v_nxv16f32_nxv16i16(<vscale x 16 x float> *%0, <vscale x 16 x i16> %1, <vscale x 16 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i16(
-    <vscale x 16 x float> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x float> %2,
-    i64 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16(
-  <vscale x 16 x float>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i16(<vscale x 16 x float> *%0, <vscale x 16 x i16> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16(
-    <vscale x 16 x float> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i16(<vscale x 1 x double> *%0, <vscale x 1 x i16> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i16(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i16(<vscale x 1 x double> *%0, <vscale x 1 x i16> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i16(<vscale x 2 x double> *%0, <vscale x 2 x i16> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i16(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i16(<vscale x 2 x double> *%0, <vscale x 2 x i16> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i16(<vscale x 4 x double> *%0, <vscale x 4 x i16> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i16(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i16(<vscale x 4 x double> *%0, <vscale x 4 x i16> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i16(<vscale x 8 x double> *%0, <vscale x 8 x i16> %1, <vscale x 8 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i16(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x double> %2,
-    i64 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i16(<vscale x 8 x double> *%0, <vscale x 8 x i16> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoswap_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x float>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_v_nxv1f32_nxv1i8(<vscale x 1 x float> *%0, <vscale x 1 x i8> %1, <vscale x 1 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.nxv1f32.nxv1i8(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x float> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i8(<vscale x 1 x float> *%0, <vscale x 1 x i8> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8(
-    <vscale x 1 x float> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_v_nxv2f32_nxv2i8(<vscale x 2 x float> *%0, <vscale x 2 x i8> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.nxv2f32.nxv2i8(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i8(<vscale x 2 x float> *%0, <vscale x 2 x i8> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8(
-    <vscale x 2 x float> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x float>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_v_nxv4f32_nxv4i8(<vscale x 4 x float> *%0, <vscale x 4 x i8> %1, <vscale x 4 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.nxv4f32.nxv4i8(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x float> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i8(<vscale x 4 x float> *%0, <vscale x 4 x i8> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8(
-    <vscale x 4 x float> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x float>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_v_nxv8f32_nxv8i8(<vscale x 8 x float> *%0, <vscale x 8 x i8> %1, <vscale x 8 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.nxv8f32.nxv8i8(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x float> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i8(<vscale x 8 x float> *%0, <vscale x 8 x i8> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8(
-    <vscale x 8 x float> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8(
-  <vscale x 16 x float>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x float>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vamoswap_v_nxv16f32_nxv16i8(<vscale x 16 x float> *%0, <vscale x 16 x i8> %1, <vscale x 16 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv16f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.nxv16f32.nxv16i8(
-    <vscale x 16 x float> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x float> %2,
-    i64 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8(
-  <vscale x 16 x float>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i8(<vscale x 16 x float> *%0, <vscale x 16 x i8> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8(
-    <vscale x 16 x float> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_v_nxv1f64_nxv1i8(<vscale x 1 x double> *%0, <vscale x 1 x i8> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.nxv1f64.nxv1i8(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i8(<vscale x 1 x double> *%0, <vscale x 1 x i8> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8(
-    <vscale x 1 x double> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x double>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_v_nxv2f64_nxv2i8(<vscale x 2 x double> *%0, <vscale x 2 x i8> %1, <vscale x 2 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.nxv2f64.nxv2i8(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x double> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i8(<vscale x 2 x double> *%0, <vscale x 2 x i8> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8(
-    <vscale x 2 x double> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x double>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_v_nxv4f64_nxv4i8(<vscale x 4 x double> *%0, <vscale x 4 x i8> %1, <vscale x 4 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.nxv4f64.nxv4i8(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x double> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i8(<vscale x 4 x double> *%0, <vscale x 4 x i8> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8(
-    <vscale x 4 x double> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x double>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_v_nxv8f64_nxv8i8(<vscale x 8 x double> *%0, <vscale x 8 x i8> %1, <vscale x 8 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.nxv8f64.nxv8i8(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x double> %2,
-    i64 %3)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i8(<vscale x 8 x double> *%0, <vscale x 8 x i8> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoswapei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8(
-    <vscale x 8 x double> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll
deleted file mode 100644
index f33cf2322321a..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoxor_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoxor_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoxor_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i32 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll
deleted file mode 100644
index c641d091ffedd..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll
+++ /dev/null
@@ -1,1682 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zvamo -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i64(<vscale x 1 x i32> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i64(<vscale x 2 x i32> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i64(<vscale x 4 x i32> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i64(<vscale x 8 x i32> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> *%0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> *%0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> *%0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> *%0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei64.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoxor_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> *%0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i32(<vscale x 1 x i64> *%0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i32(<vscale x 2 x i64> *%0, <vscale x 2 x i32> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i32(<vscale x 4 x i64> *%0, <vscale x 4 x i32> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i32(<vscale x 8 x i64> *%0, <vscale x 8 x i32> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei32.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i16(<vscale x 1 x i32> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i16(<vscale x 2 x i32> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i16(<vscale x 4 x i32> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i16(<vscale x 8 x i32> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoxor_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i16(<vscale x 16 x i32> *%0, <vscale x 16 x i16> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i16(<vscale x 1 x i64> *%0, <vscale x 1 x i16> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i16(<vscale x 2 x i64> *%0, <vscale x 2 x i16> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i16(<vscale x 4 x i64> *%0, <vscale x 4 x i16> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i16(<vscale x 8 x i64> *%0, <vscale x 8 x i16> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei16.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i8(<vscale x 1 x i32> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i8(<vscale x 2 x i32> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i8(<vscale x 4 x i32> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i8(<vscale x 8 x i32> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoxor_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i8(<vscale x 16 x i32> *%0, <vscale x 16 x i8> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> *%0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i8(<vscale x 1 x i64> *%0, <vscale x 1 x i8> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v9, (a0), v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> *%0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i8(<vscale x 2 x i64> *%0, <vscale x 2 x i8> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v10, (a0), v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> *%0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i8(<vscale x 4 x i64> *%0, <vscale x 4 x i8> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v12, (a0), v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> *%0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i8(<vscale x 8 x i64> *%0, <vscale x 8 x i8> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vamoxorei8.v v16, (a0), v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> *%0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/MC/RISCV/attribute-arch-invalid.s b/llvm/test/MC/RISCV/attribute-arch-invalid.s
index 73bed8580902b..520df66310df3 100644
--- a/llvm/test/MC/RISCV/attribute-arch-invalid.s
+++ b/llvm/test/MC/RISCV/attribute-arch-invalid.s
@@ -44,5 +44,5 @@
 .attribute arch, "rv32ifzfh"
 # CHECK: error: invalid arch name 'rv32ifzfh', experimental extension requires explicit version number `zfh`
 
-.attribute arch, "rv32ivzvamo_zvlsseg"
-# CHECK: error: invalid arch name 'rv32ivzvamo_zvlsseg', experimental extension requires explicit version number `v`
+.attribute arch, "rv32ivzvlsseg"
+# CHECK: error: invalid arch name 'rv32ivzvlsseg', experimental extension requires explicit version number `v`

diff  --git a/llvm/test/MC/RISCV/attribute-arch.s b/llvm/test/MC/RISCV/attribute-arch.s
index 2928222b64fff..f543564ba39d0 100644
--- a/llvm/test/MC/RISCV/attribute-arch.s
+++ b/llvm/test/MC/RISCV/attribute-arch.s
@@ -74,5 +74,5 @@
 .attribute arch, "rv32ifzfh0p1"
 # CHECK: attribute      5, "rv32i2p0_f2p0_zfh0p1_zfhmin0p1"
 
-.attribute arch, "rv32iv0p10zvamo0p10_zvlsseg0p10"
-# CHECK: attribute      5, "rv32i2p0_v0p10_zvamo0p10_zvlsseg0p10"
+.attribute arch, "rv32iv0p10zvlsseg0p10"
+# CHECK: attribute      5, "rv32i2p0_v0p10_zvlsseg0p10"

diff  --git a/llvm/test/MC/RISCV/rvv/zvamo.s b/llvm/test/MC/RISCV/rvv/zvamo.s
deleted file mode 100644
index b7264879da5cd..0000000000000
--- a/llvm/test/MC/RISCV/rvv/zvamo.s
+++ /dev/null
@@ -1,874 +0,0 @@
-# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+a,+experimental-zvamo %s \
-# RUN:        | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
-# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
-# RUN:        | FileCheck %s --check-prefix=CHECK-ERROR
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+a,+experimental-zvamo %s \
-# RUN:        | llvm-objdump -d --mattr=+a,+experimental-zvamo - \
-# RUN:        | FileCheck %s --check-prefix=CHECK-INST
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+a,+experimental-zvamo %s \
-# RUN:        | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
-
-
-vamoswapei8.v v8, (a0), v4, v8
-# CHECK-INST: vamoswapei8.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x04,0x45,0x0e]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 0e <unknown>
-
-vamoswapei16.v v8, (a0), v4, v8
-# CHECK-INST: vamoswapei16.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x54,0x45,0x0e]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 0e <unknown>
-
-vamoswapei32.v v8, (a0), v4, v8
-# CHECK-INST: vamoswapei32.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x64,0x45,0x0e]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 0e <unknown>
-
-vamoswapei64.v v8, (a0), v4, v8
-# CHECK-INST: vamoswapei64.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x74,0x45,0x0e]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 0e <unknown>
-
-vamoswapei8.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoswapei8.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x04,0x45,0x0c]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 0c <unknown>
-
-vamoswapei16.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoswapei16.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x54,0x45,0x0c]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 0c <unknown>
-
-vamoswapei32.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoswapei32.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x64,0x45,0x0c]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 0c <unknown>
-
-vamoswapei64.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoswapei64.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x74,0x45,0x0c]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 0c <unknown>
-
-vamoaddei8.v v8, (a0), v4, v8
-# CHECK-INST: vamoaddei8.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x04,0x45,0x06]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 06 <unknown>
-
-vamoaddei16.v v8, (a0), v4, v8
-# CHECK-INST: vamoaddei16.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x54,0x45,0x06]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 06 <unknown>
-
-vamoaddei32.v v8, (a0), v4, v8
-# CHECK-INST: vamoaddei32.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x64,0x45,0x06]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 06 <unknown>
-
-vamoaddei64.v v8, (a0), v4, v8
-# CHECK-INST: vamoaddei64.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x74,0x45,0x06]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 06 <unknown>
-
-vamoaddei8.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoaddei8.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x04,0x45,0x04]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 04 <unknown>
-
-vamoaddei16.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoaddei16.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x54,0x45,0x04]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 04 <unknown>
-
-vamoaddei32.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoaddei32.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x64,0x45,0x04]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 04 <unknown>
-
-vamoaddei64.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoaddei64.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x74,0x45,0x04]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 04 <unknown>
-
-vamoxorei8.v v8, (a0), v4, v8
-# CHECK-INST: vamoxorei8.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x04,0x45,0x26]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 26 <unknown>
-
-vamoxorei16.v v8, (a0), v4, v8
-# CHECK-INST: vamoxorei16.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x54,0x45,0x26]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 26 <unknown>
-
-vamoxorei32.v v8, (a0), v4, v8
-# CHECK-INST: vamoxorei32.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x64,0x45,0x26]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 26 <unknown>
-
-vamoxorei64.v v8, (a0), v4, v8
-# CHECK-INST: vamoxorei64.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x74,0x45,0x26]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 26 <unknown>
-
-vamoxorei8.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoxorei8.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x04,0x45,0x24]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 24 <unknown>
-
-vamoxorei16.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoxorei16.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x54,0x45,0x24]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 24 <unknown>
-
-vamoxorei32.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoxorei32.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x64,0x45,0x24]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 24 <unknown>
-
-vamoxorei64.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoxorei64.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x74,0x45,0x24]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 24 <unknown>
-
-vamoandei8.v v8, (a0), v4, v8
-# CHECK-INST: vamoandei8.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x04,0x45,0x66]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 66 <unknown>
-
-vamoandei16.v v8, (a0), v4, v8
-# CHECK-INST: vamoandei16.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x54,0x45,0x66]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 66 <unknown>
-
-vamoandei32.v v8, (a0), v4, v8
-# CHECK-INST: vamoandei32.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x64,0x45,0x66]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 66 <unknown>
-
-vamoandei64.v v8, (a0), v4, v8
-# CHECK-INST: vamoandei64.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x74,0x45,0x66]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 66 <unknown>
-
-vamoandei8.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoandei8.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x04,0x45,0x64]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 64 <unknown>
-
-vamoandei16.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoandei16.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x54,0x45,0x64]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 64 <unknown>
-
-vamoandei32.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoandei32.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x64,0x45,0x64]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 64 <unknown>
-
-vamoandei64.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoandei64.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x74,0x45,0x64]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 64 <unknown>
-
-vamoorei8.v v8, (a0), v4, v8
-# CHECK-INST: vamoorei8.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x04,0x45,0x46]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 46 <unknown>
-
-vamoorei16.v v8, (a0), v4, v8
-# CHECK-INST: vamoorei16.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x54,0x45,0x46]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 46 <unknown>
-
-vamoorei32.v v8, (a0), v4, v8
-# CHECK-INST: vamoorei32.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x64,0x45,0x46]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 46 <unknown>
-
-vamoorei64.v v8, (a0), v4, v8
-# CHECK-INST: vamoorei64.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x74,0x45,0x46]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 46 <unknown>
-
-vamoorei8.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoorei8.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x04,0x45,0x44]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 44 <unknown>
-
-vamoorei16.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoorei16.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x54,0x45,0x44]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 44 <unknown>
-
-vamoorei32.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoorei32.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x64,0x45,0x44]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 44 <unknown>
-
-vamoorei64.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamoorei64.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x74,0x45,0x44]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 44 <unknown>
-
-vamominei8.v v8, (a0), v4, v8
-# CHECK-INST: vamominei8.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x04,0x45,0x86]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 86 <unknown>
-
-vamominei16.v v8, (a0), v4, v8
-# CHECK-INST: vamominei16.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x54,0x45,0x86]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 86 <unknown>
-
-vamominei32.v v8, (a0), v4, v8
-# CHECK-INST: vamominei32.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x64,0x45,0x86]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 86 <unknown>
-
-vamominei64.v v8, (a0), v4, v8
-# CHECK-INST: vamominei64.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x74,0x45,0x86]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 86 <unknown>
-
-vamominei8.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamominei8.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x04,0x45,0x84]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 84 <unknown>
-
-vamominei16.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamominei16.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x54,0x45,0x84]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 84 <unknown>
-
-vamominei32.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamominei32.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x64,0x45,0x84]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 84 <unknown>
-
-vamominei64.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamominei64.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x74,0x45,0x84]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 84 <unknown>
-
-vamomaxei8.v v8, (a0), v4, v8
-# CHECK-INST: vamomaxei8.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x04,0x45,0xa6]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 a6 <unknown>
-
-vamomaxei16.v v8, (a0), v4, v8
-# CHECK-INST: vamomaxei16.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x54,0x45,0xa6]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 a6 <unknown>
-
-vamomaxei32.v v8, (a0), v4, v8
-# CHECK-INST: vamomaxei32.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x64,0x45,0xa6]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 a6 <unknown>
-
-vamomaxei64.v v8, (a0), v4, v8
-# CHECK-INST: vamomaxei64.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x74,0x45,0xa6]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 a6 <unknown>
-
-vamomaxei8.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamomaxei8.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x04,0x45,0xa4]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 a4 <unknown>
-
-vamomaxei16.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamomaxei16.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x54,0x45,0xa4]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 a4 <unknown>
-
-vamomaxei32.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamomaxei32.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x64,0x45,0xa4]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 a4 <unknown>
-
-vamomaxei64.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamomaxei64.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x74,0x45,0xa4]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 a4 <unknown>
-
-vamominuei8.v v8, (a0), v4, v8
-# CHECK-INST: vamominuei8.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x04,0x45,0xc6]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 c6 <unknown>
-
-vamominuei16.v v8, (a0), v4, v8
-# CHECK-INST: vamominuei16.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x54,0x45,0xc6]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 c6 <unknown>
-
-vamominuei32.v v8, (a0), v4, v8
-# CHECK-INST: vamominuei32.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x64,0x45,0xc6]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 c6 <unknown>
-
-vamominuei64.v v8, (a0), v4, v8
-# CHECK-INST: vamominuei64.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x74,0x45,0xc6]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 c6 <unknown>
-
-vamominuei8.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamominuei8.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x04,0x45,0xc4]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 c4 <unknown>
-
-vamominuei16.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamominuei16.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x54,0x45,0xc4]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 c4 <unknown>
-
-vamominuei32.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamominuei32.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x64,0x45,0xc4]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 c4 <unknown>
-
-vamominuei64.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamominuei64.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x74,0x45,0xc4]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 c4 <unknown>
-
-vamomaxuei8.v v8, (a0), v4, v8
-# CHECK-INST: vamomaxuei8.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x04,0x45,0xe6]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 e6 <unknown>
-
-vamomaxuei16.v v8, (a0), v4, v8
-# CHECK-INST: vamomaxuei16.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x54,0x45,0xe6]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 e6 <unknown>
-
-vamomaxuei32.v v8, (a0), v4, v8
-# CHECK-INST: vamomaxuei32.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x64,0x45,0xe6]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 e6 <unknown>
-
-vamomaxuei64.v v8, (a0), v4, v8
-# CHECK-INST: vamomaxuei64.v v8, (a0), v4, v8
-# CHECK-ENCODING: [0x2f,0x74,0x45,0xe6]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 e6 <unknown>
-
-vamomaxuei8.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamomaxuei8.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x04,0x45,0xe4]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 04 45 e4 <unknown>
-
-vamomaxuei16.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamomaxuei16.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x54,0x45,0xe4]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 54 45 e4 <unknown>
-
-vamomaxuei32.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamomaxuei32.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x64,0x45,0xe4]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 64 45 e4 <unknown>
-
-vamomaxuei64.v v8, (a0), v4, v8, v0.t
-# CHECK-INST: vamomaxuei64.v v8, (a0), v4, v8, v0.t
-# CHECK-ENCODING: [0x2f,0x74,0x45,0xe4]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 74 45 e4 <unknown>
-
-vamoswapei8.v x0, (a0), v4, v24
-# CHECK-INST: vamoswapei8.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0x0a]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 0a <unknown>
-
-vamoswapei16.v x0, (a0), v4, v24
-# CHECK-INST: vamoswapei16.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0x0a]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 0a <unknown>
-
-vamoswapei32.v x0, (a0), v4, v24
-# CHECK-INST: vamoswapei32.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0x0a]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 0a <unknown>
-
-vamoswapei64.v x0, (a0), v4, v24
-# CHECK-INST: vamoswapei64.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0x0a]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 0a <unknown>
-
-vamoswapei8.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoswapei8.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0x08]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 08 <unknown>
-
-vamoswapei16.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoswapei16.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0x08]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 08 <unknown>
-
-vamoswapei32.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoswapei32.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0x08]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 08 <unknown>
-
-vamoswapei64.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoswapei64.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0x08]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 08 <unknown>
-
-vamoaddei8.v x0, (a0), v4, v24
-# CHECK-INST: vamoaddei8.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0x02]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 02 <unknown>
-
-vamoaddei16.v x0, (a0), v4, v24
-# CHECK-INST: vamoaddei16.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0x02]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 02 <unknown>
-
-vamoaddei32.v x0, (a0), v4, v24
-# CHECK-INST: vamoaddei32.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0x02]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 02 <unknown>
-
-vamoaddei64.v x0, (a0), v4, v24
-# CHECK-INST: vamoaddei64.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0x02]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 02 <unknown>
-
-vamoaddei8.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoaddei8.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0x00]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 00 <unknown>
-
-vamoaddei16.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoaddei16.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0x00]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 00 <unknown>
-
-vamoaddei32.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoaddei32.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0x00]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 00 <unknown>
-
-vamoaddei64.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoaddei64.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0x00]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 00 <unknown>
-
-vamoxorei8.v x0, (a0), v4, v24
-# CHECK-INST: vamoxorei8.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0x22]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 22 <unknown>
-
-vamoxorei16.v x0, (a0), v4, v24
-# CHECK-INST: vamoxorei16.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0x22]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 22 <unknown>
-
-vamoxorei32.v x0, (a0), v4, v24
-# CHECK-INST: vamoxorei32.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0x22]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 22 <unknown>
-
-vamoxorei64.v x0, (a0), v4, v24
-# CHECK-INST: vamoxorei64.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0x22]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 22 <unknown>
-
-vamoxorei8.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoxorei8.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0x20]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 20 <unknown>
-
-vamoxorei16.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoxorei16.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0x20]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 20 <unknown>
-
-vamoxorei32.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoxorei32.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0x20]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 20 <unknown>
-
-vamoxorei64.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoxorei64.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0x20]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 20 <unknown>
-
-vamoandei8.v x0, (a0), v4, v24
-# CHECK-INST: vamoandei8.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0x62]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 62 <unknown>
-
-vamoandei16.v x0, (a0), v4, v24
-# CHECK-INST: vamoandei16.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0x62]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 62 <unknown>
-
-vamoandei32.v x0, (a0), v4, v24
-# CHECK-INST: vamoandei32.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0x62]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 62 <unknown>
-
-vamoandei64.v x0, (a0), v4, v24
-# CHECK-INST: vamoandei64.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0x62]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 62 <unknown>
-
-vamoandei8.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoandei8.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0x60]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 60 <unknown>
-
-vamoandei16.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoandei16.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0x60]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 60 <unknown>
-
-vamoandei32.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoandei32.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0x60]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 60 <unknown>
-
-vamoandei64.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoandei64.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0x60]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 60 <unknown>
-
-vamoorei8.v x0, (a0), v4, v24
-# CHECK-INST: vamoorei8.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0x42]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 42 <unknown>
-
-vamoorei16.v x0, (a0), v4, v24
-# CHECK-INST: vamoorei16.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0x42]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 42 <unknown>
-
-vamoorei32.v x0, (a0), v4, v24
-# CHECK-INST: vamoorei32.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0x42]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 42 <unknown>
-
-vamoorei64.v x0, (a0), v4, v24
-# CHECK-INST: vamoorei64.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0x42]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 42 <unknown>
-
-vamoorei8.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoorei8.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0x40]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 40 <unknown>
-
-vamoorei16.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoorei16.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0x40]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 40 <unknown>
-
-vamoorei32.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoorei32.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0x40]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 40 <unknown>
-
-vamoorei64.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamoorei64.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0x40]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 40 <unknown>
-
-vamominei8.v x0, (a0), v4, v24
-# CHECK-INST: vamominei8.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0x82]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 82 <unknown>
-
-vamominei16.v x0, (a0), v4, v24
-# CHECK-INST: vamominei16.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0x82]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 82 <unknown>
-
-vamominei32.v x0, (a0), v4, v24
-# CHECK-INST: vamominei32.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0x82]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 82 <unknown>
-
-vamominei64.v x0, (a0), v4, v24
-# CHECK-INST: vamominei64.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0x82]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 82 <unknown>
-
-vamominei8.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamominei8.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0x80]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 80 <unknown>
-
-vamominei16.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamominei16.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0x80]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 80 <unknown>
-
-vamominei32.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamominei32.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0x80]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 80 <unknown>
-
-vamominei64.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamominei64.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0x80]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 80 <unknown>
-
-vamomaxei8.v x0, (a0), v4, v24
-# CHECK-INST: vamomaxei8.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0xa2]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 a2 <unknown>
-
-vamomaxei16.v x0, (a0), v4, v24
-# CHECK-INST: vamomaxei16.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0xa2]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 a2 <unknown>
-
-vamomaxei32.v x0, (a0), v4, v24
-# CHECK-INST: vamomaxei32.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0xa2]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 a2 <unknown>
-
-vamomaxei64.v x0, (a0), v4, v24
-# CHECK-INST: vamomaxei64.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0xa2]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 a2 <unknown>
-
-vamomaxei8.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamomaxei8.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0xa0]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 a0 <unknown>
-
-vamomaxei16.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamomaxei16.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0xa0]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 a0 <unknown>
-
-vamomaxei32.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamomaxei32.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0xa0]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 a0 <unknown>
-
-vamomaxei64.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamomaxei64.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0xa0]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 a0 <unknown>
-
-vamominuei8.v x0, (a0), v4, v24
-# CHECK-INST: vamominuei8.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0xc2]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 c2 <unknown>
-
-vamominuei16.v x0, (a0), v4, v24
-# CHECK-INST: vamominuei16.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0xc2]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 c2 <unknown>
-
-vamominuei32.v x0, (a0), v4, v24
-# CHECK-INST: vamominuei32.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0xc2]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 c2 <unknown>
-
-vamominuei64.v x0, (a0), v4, v24
-# CHECK-INST: vamominuei64.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0xc2]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 c2 <unknown>
-
-vamominuei8.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamominuei8.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0xc0]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 c0 <unknown>
-
-vamominuei16.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamominuei16.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0xc0]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 c0 <unknown>
-
-vamominuei32.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamominuei32.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0xc0]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 c0 <unknown>
-
-vamominuei64.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamominuei64.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0xc0]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 c0 <unknown>
-
-vamomaxuei8.v x0, (a0), v4, v24
-# CHECK-INST: vamomaxuei8.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0xe2]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 e2 <unknown>
-
-vamomaxuei16.v x0, (a0), v4, v24
-# CHECK-INST: vamomaxuei16.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0xe2]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 e2 <unknown>
-
-vamomaxuei32.v x0, (a0), v4, v24
-# CHECK-INST: vamomaxuei32.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0xe2]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 e2 <unknown>
-
-vamomaxuei64.v x0, (a0), v4, v24
-# CHECK-INST: vamomaxuei64.v x0, (a0), v4, v24
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0xe2]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 e2 <unknown>
-
-vamomaxuei8.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamomaxuei8.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x0c,0x45,0xe0]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 0c 45 e0 <unknown>
-
-vamomaxuei16.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamomaxuei16.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x5c,0x45,0xe0]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 5c 45 e0 <unknown>
-
-vamomaxuei32.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamomaxuei32.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x6c,0x45,0xe0]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 6c 45 e0 <unknown>
-
-vamomaxuei64.v x0, (a0), v4, v24, v0.t
-# CHECK-INST: vamomaxuei64.v x0, (a0), v4, v24, v0.t
-# CHECK-ENCODING: [0x2f,0x7c,0x45,0xe0]
-# CHECK-ERROR: instruction requires the following: 'A' (Atomic Instructions), 'Zvamo' (Vector AMO Operations)
-# CHECK-UNKNOWN: 2f 7c 45 e0 <unknown>


        


More information about the cfe-commits mailing list