[llvm] [RISCV] Implement cross basic block VXRM write insertion. (PR #70382)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 1 16:50:21 PDT 2023


https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/70382

>From a324eefe471137b2a0d362faf18736297d78a975 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 1 Sep 2023 14:22:21 -0700
Subject: [PATCH 1/9] [RISCV] Implement cross basic block VXRM write insertion.

This adds a new pass to insert VXRM writes for vector instructions.
With the goal of avoiding redundant writes.

The pass does 2 dataflow algorithms. The first is a forward data flow
to calculate where a VXRM value is available. The second is a backwards
dataflow to determine where a VXRM value is anticipated.

Finally, we use the results of these two dataflows to insert VXRM writes
where a value is anticipated, but not available.

The pass does not split critical edges so we aren't always able
to eliminate all redundancy.

The pass will only insert vxrm writes on paths that always require
it.
---
 llvm/lib/Target/RISCV/CMakeLists.txt          |   1 +
 llvm/lib/Target/RISCV/RISCV.h                 |   3 +
 .../Target/RISCV/RISCVInsertReadWriteCSR.cpp  |  19 +-
 .../lib/Target/RISCV/RISCVInsertWriteVXRM.cpp | 457 +++++++++++++++++
 llvm/lib/Target/RISCV/RISCVTargetMachine.cpp  |   2 +
 llvm/test/CodeGen/RISCV/O0-pipeline.ll        |   1 +
 llvm/test/CodeGen/RISCV/O3-pipeline.ll        |   1 +
 llvm/test/CodeGen/RISCV/rvv/copyprop.mir      |   2 +-
 llvm/test/CodeGen/RISCV/rvv/masked-tama.ll    |   8 +-
 llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll    |   8 +-
 llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll    |   8 +-
 llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll    |   8 +-
 .../RISCV/rvv/mutate-prior-vsetvli-avl.ll     |  12 +-
 .../RISCV/rvv/rvv-peephole-vmerge-vops.ll     |   2 +-
 llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll    |  22 +-
 llvm/test/CodeGen/RISCV/rvv/vaadd.ll          | 176 +++----
 llvm/test/CodeGen/RISCV/rvv/vaaddu.ll         | 176 +++----
 llvm/test/CodeGen/RISCV/rvv/vasub.ll          | 176 +++----
 llvm/test/CodeGen/RISCV/rvv/vasubu.ll         | 176 +++----
 llvm/test/CodeGen/RISCV/rvv/vnclip.ll         | 180 +++----
 llvm/test/CodeGen/RISCV/rvv/vnclipu.ll        | 180 +++----
 llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll     | 160 +++---
 llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll     | 176 +++----
 llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll     | 176 +++----
 llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll     | 176 +++----
 llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll     | 176 +++----
 llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll     | 176 +++----
 llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll    | 473 ++++++++++++++++++
 llvm/test/CodeGen/RISCV/rvv/vxrm.mir          |   4 +-
 29 files changed, 2029 insertions(+), 1106 deletions(-)
 create mode 100644 llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll

diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt
index 4d5fa79389ea68b..5b94ce3bea451a0 100644
--- a/llvm/lib/Target/RISCV/CMakeLists.txt
+++ b/llvm/lib/Target/RISCV/CMakeLists.txt
@@ -37,6 +37,7 @@ add_llvm_target(RISCVCodeGen
   RISCVGatherScatterLowering.cpp
   RISCVInsertVSETVLI.cpp
   RISCVInsertReadWriteCSR.cpp
+  RISCVInsertWriteVXRM.cpp
   RISCVInstrInfo.cpp
   RISCVISelDAGToDAG.cpp
   RISCVISelLowering.cpp
diff --git a/llvm/lib/Target/RISCV/RISCV.h b/llvm/lib/Target/RISCV/RISCV.h
index 3d8e33dc716ea44..6092d312e4f98b0 100644
--- a/llvm/lib/Target/RISCV/RISCV.h
+++ b/llvm/lib/Target/RISCV/RISCV.h
@@ -68,6 +68,9 @@ void initializeRISCVPostRAExpandPseudoPass(PassRegistry &);
 FunctionPass *createRISCVInsertReadWriteCSRPass();
 void initializeRISCVInsertReadWriteCSRPass(PassRegistry &);
 
+FunctionPass *createRISCVInsertWriteVXRMPass();
+void initializeRISCVInsertWriteVXRMPass(PassRegistry &);
+
 FunctionPass *createRISCVRedundantCopyEliminationPass();
 void initializeRISCVRedundantCopyEliminationPass(PassRegistry &);
 
diff --git a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
index 75f5ac3fbe0dd55..acd19bf1b8a162e 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
@@ -9,7 +9,6 @@
 // of the RISC-V instructions.
 //
 // Currently the pass implements:
-// -Naive insertion of a write to vxrm before an RVV fixed-point instruction.
 // -Writing and saving frm before an RVV floating-point instruction with a
 //  static rounding mode and restores the value after.
 //
@@ -58,25 +57,11 @@ char RISCVInsertReadWriteCSR::ID = 0;
 INITIALIZE_PASS(RISCVInsertReadWriteCSR, DEBUG_TYPE,
                 RISCV_INSERT_READ_WRITE_CSR_NAME, false, false)
 
-// This function inserts a write to vxrm when encountering an RVV fixed-point
-// instruction. This function also swaps frm and restores it when encountering
-// an RVV floating point instruction with a static rounding mode.
+// This function also swaps frm and restores it when encountering an RVV
+// floating point instruction with a static rounding mode.
 bool RISCVInsertReadWriteCSR::emitWriteRoundingMode(MachineBasicBlock &MBB) {
   bool Changed = false;
   for (MachineInstr &MI : MBB) {
-    int VXRMIdx = RISCVII::getVXRMOpNum(MI.getDesc());
-    if (VXRMIdx >= 0) {
-      unsigned VXRMImm = MI.getOperand(VXRMIdx).getImm();
-
-      Changed = true;
-
-      BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::WriteVXRMImm))
-          .addImm(VXRMImm);
-      MI.addOperand(MachineOperand::CreateReg(RISCV::VXRM, /*IsDef*/ false,
-                                              /*IsImp*/ true));
-      continue;
-    }
-
     int FRMIdx = RISCVII::getFRMOpNum(MI.getDesc());
     if (FRMIdx < 0)
       continue;
diff --git a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
new file mode 100644
index 000000000000000..8313a809b84cbcb
--- /dev/null
+++ b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
@@ -0,0 +1,457 @@
+//===-- RISCVInsertWriteVXRM.cpp - Insert Write of RISC-V VXRM CSR --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass inserts writes to the VXRM CSR as needed by vector instructions.
+// Each instruction that uses VXRM carries an operand that contains its required
+// VXRM value. This pass tries to optimize placement to avoid redundant writes
+// to VXRM.
+//
+// This is done using 2 dataflow algorithms. The first is a forward data flow
+// to calculate where a VXRM value is available. The second is a backwards
+// dataflow to determine where a VXRM value is anticipated.
+//
+// Finally, we use the results of these two dataflows to insert VXRM writes
+// where a value is anticipated, but not available.
+//
+// FIXME: This pass does not split critical edges, so there can still be some
+// redundancy.
+//
+// FIXME: If we are willing to have writes that aren't always needed, we could
+// reduce the number of VXRM writes in some cases.
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/RISCVBaseInfo.h"
+#include "RISCV.h"
+#include "RISCVSubtarget.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include <queue>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-insert-write-vxrm"
+#define RISCV_INSERT_WRITE_VXRM_NAME "RISC-V Insert Write VXRM Pass"
+
+namespace {
+
+class VXRMInfo {
+  uint8_t VXRMImm = 0;
+
+  enum : uint8_t {
+    Uninitialized,
+    Static,
+    Unknown,
+  } State = Uninitialized;
+
+public:
+  VXRMInfo() {}
+
+  static VXRMInfo getUnknown() {
+    VXRMInfo Info;
+    Info.setUnknown();
+    return Info;
+  }
+
+  bool isValid() const { return State != Uninitialized; }
+  void setUnknown() { State = Unknown; }
+  bool isUnknown() const { return State == Unknown; }
+
+  bool isStatic() const { return State == Static; }
+
+  void setVXRMImm(unsigned Imm) {
+    assert(Imm <= 3 && "Unexpected VXRM value");
+    VXRMImm = Imm;
+    State = Static;
+  }
+  unsigned getVXRMImm() const {
+    assert(isStatic() && VXRMImm <= 3 && "Unexpected state");
+    return VXRMImm;
+  }
+
+  bool operator==(const VXRMInfo &Other) const {
+    // Uninitialized is only equal to another Uninitialized.
+    if (!isValid())
+      return !Other.isValid();
+    if (!Other.isValid())
+      return !isValid();
+
+    // Unknown is only equal to another Unknown.
+    if (isUnknown())
+      return Other.isUnknown();
+    if (Other.isUnknown())
+      return isUnknown();
+
+    return VXRMImm == Other.VXRMImm;
+  }
+
+  bool operator!=(const VXRMInfo &Other) const { return !(*this == Other); }
+
+  // Calculate the VXRMInfo visible to a block assuming this and Other are
+  // both predecessors.
+  VXRMInfo intersect(const VXRMInfo &Other) const {
+    // If the new value isn't valid, ignore it.
+    if (!Other.isValid())
+      return *this;
+
+    // If this value isn't valid, this must be the first predecessor, use it.
+    if (!isValid())
+      return Other;
+
+    // If either is unknown, the result is unknown.
+    if (isUnknown() || Other.isUnknown())
+      return VXRMInfo::getUnknown();
+
+    // If we have an exact match, return this.
+    if (*this == Other)
+      return *this;
+
+    // Otherwise the result is unknown.
+    return VXRMInfo::getUnknown();
+  }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+  /// Support for debugging, callable in GDB: V->dump()
+  LLVM_DUMP_METHOD void dump() const {
+    print(dbgs());
+    dbgs() << "\n";
+  }
+
+  void print(raw_ostream &OS) const {
+    OS << '{';
+    if (!isValid())
+      OS << "Uninitialized";
+    else if (isUnknown())
+      OS << "Unknown";
+    else
+      OS << getVXRMImm();
+    OS << '}';
+  }
+#endif
+};
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_ATTRIBUTE_USED
+inline raw_ostream &operator<<(raw_ostream &OS, const VXRMInfo &V) {
+  V.print(OS);
+  return OS;
+}
+#endif
+
+struct BlockData {
+  // Indicates if the block uses VXRM. Uninitialized means no use.
+  VXRMInfo VXRMUse;
+
+  // Indicates the VXRM output from the block. Unitialized means transparent.
+  VXRMInfo VXRMOut;
+
+  // Keeps track of the available VXRM value at the start of the basic bloc.
+  VXRMInfo AvailableIn;
+
+  // Keeps track of the available VXRM value at the end of the basic block.
+  VXRMInfo AvailableOut;
+
+  // Keeps track of what VXRM is anticipated at the start of the basic block.
+  VXRMInfo AnticipatedIn;
+
+  // Keeps track of what VXRM is anticipated at the end of the basic block.
+  VXRMInfo AnticipatedOut;
+
+  // Keeps track of whether the block is already in the queue.
+  bool InQueue;
+
+  BlockData() = default;
+};
+
+class RISCVInsertWriteVXRM : public MachineFunctionPass {
+  const TargetInstrInfo *TII;
+
+  std::vector<BlockData> BlockInfo;
+  std::queue<const MachineBasicBlock *> WorkList;
+
+public:
+  static char ID;
+
+  RISCVInsertWriteVXRM() : MachineFunctionPass(ID) {
+    initializeRISCVInsertWriteVXRMPass(*PassRegistry::getPassRegistry());
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesCFG();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  StringRef getPassName() const override {
+    return RISCV_INSERT_WRITE_VXRM_NAME;
+  }
+
+private:
+  bool computeVXRMChanges(const MachineBasicBlock &MBB);
+  void computeAvailable(const MachineBasicBlock &MBB);
+  void computeAnticipated(const MachineBasicBlock &MBB);
+  void emitWriteVXRM(MachineBasicBlock &MBB);
+};
+
+} // end anonymous namespace
+
+char RISCVInsertWriteVXRM::ID = 0;
+
+INITIALIZE_PASS(RISCVInsertWriteVXRM, DEBUG_TYPE, RISCV_INSERT_WRITE_VXRM_NAME,
+                false, false)
+
+bool RISCVInsertWriteVXRM::computeVXRMChanges(const MachineBasicBlock &MBB) {
+  BlockData &BBInfo = BlockInfo[MBB.getNumber()];
+
+  bool NeedVXRMWrite = false;
+  for (const MachineInstr &MI : MBB) {
+    int VXRMIdx = RISCVII::getVXRMOpNum(MI.getDesc());
+    if (VXRMIdx >= 0) {
+      unsigned NewVXRMImm = MI.getOperand(VXRMIdx).getImm();
+
+      if (!BBInfo.VXRMUse.isValid())
+        BBInfo.VXRMUse.setVXRMImm(NewVXRMImm);
+
+      BBInfo.VXRMOut.setVXRMImm(NewVXRMImm);
+      NeedVXRMWrite = true;
+    }
+
+    if (MI.isCall() || MI.isInlineAsm()) {
+      if (!BBInfo.VXRMUse.isValid())
+        BBInfo.VXRMUse.setUnknown();
+
+      BBInfo.VXRMOut.setUnknown();
+    }
+  }
+
+  return NeedVXRMWrite;
+}
+
+void RISCVInsertWriteVXRM::computeAvailable(const MachineBasicBlock &MBB) {
+  BlockData &BBInfo = BlockInfo[MBB.getNumber()];
+
+  BBInfo.InQueue = false;
+
+  VXRMInfo Available;
+  if (MBB.pred_empty()) {
+    Available.setUnknown();
+  } else {
+    for (const MachineBasicBlock *P : MBB.predecessors())
+      Available = Available.intersect(BlockInfo[P->getNumber()].AvailableOut);
+  }
+
+  // If we don't have any valid available info, wait until we do.
+  if (!Available.isValid())
+    return;
+
+  if (Available != BBInfo.AvailableIn) {
+    BBInfo.AvailableIn = Available;
+    LLVM_DEBUG(dbgs() << "AvailableIn state of " << printMBBReference(MBB)
+                      << " changed to " << BBInfo.AvailableIn << "\n");
+  }
+
+  if (BBInfo.VXRMOut.isValid())
+    Available = BBInfo.VXRMOut;
+
+  if (Available == BBInfo.AvailableOut)
+    return;
+
+  BBInfo.AvailableOut = Available;
+  LLVM_DEBUG(dbgs() << "AvailableOut state of " << printMBBReference(MBB)
+                    << " changed to " << BBInfo.AvailableOut << "\n");
+
+  // Add the successors to the work list so that we can propagate.
+  for (MachineBasicBlock *S : MBB.successors()) {
+    if (!BlockInfo[S->getNumber()].InQueue) {
+      BlockInfo[S->getNumber()].InQueue = true;
+      WorkList.push(S);
+    }
+  }
+}
+
+void RISCVInsertWriteVXRM::computeAnticipated(const MachineBasicBlock &MBB) {
+  BlockData &BBInfo = BlockInfo[MBB.getNumber()];
+
+  BBInfo.InQueue = false;
+
+  VXRMInfo Anticipated;
+  if (MBB.succ_empty()) {
+    Anticipated.setUnknown();
+  } else {
+    for (const MachineBasicBlock *S : MBB.successors())
+      Anticipated =
+          Anticipated.intersect(BlockInfo[S->getNumber()].AnticipatedIn);
+  }
+
+  // If we don't have any valid anticipated info, wait until we do.
+  if (!Anticipated.isValid())
+    return;
+
+  if (Anticipated != BBInfo.AnticipatedOut) {
+    BBInfo.AnticipatedOut = Anticipated;
+    LLVM_DEBUG(dbgs() << "AnticipatedOut state of " << printMBBReference(MBB)
+                      << " changed to " << BBInfo.AnticipatedOut << "\n");
+  }
+
+  // If this block reads VXRM, copy it.
+  if (BBInfo.VXRMUse.isValid())
+    Anticipated = BBInfo.VXRMUse;
+
+  if (Anticipated == BBInfo.AnticipatedIn)
+    return;
+
+  BBInfo.AnticipatedIn = Anticipated;
+  LLVM_DEBUG(dbgs() << "AnticipatedIn state of " << printMBBReference(MBB)
+                    << " changed to " << BBInfo.AnticipatedIn << "\n");
+
+  // Add the predecessors to the work list so that we can propagate.
+  for (MachineBasicBlock *P : MBB.predecessors()) {
+    if (!BlockInfo[P->getNumber()].InQueue) {
+      BlockInfo[P->getNumber()].InQueue = true;
+      WorkList.push(P);
+    }
+  }
+}
+
+void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {
+  const BlockData &BBInfo = BlockInfo[MBB.getNumber()];
+
+  VXRMInfo Info = BBInfo.AvailableIn;
+
+  // Insert VXRM write if anticipated and not available.
+  if (BBInfo.AnticipatedIn.isStatic()) {
+    bool NeedInsert = false;
+    // If there no predecessors and the value is anticipated, insert.
+    if (MBB.pred_empty()) {
+      NeedInsert = true;
+    } else {
+      // Search for any predecessors that wouldn't satisfy our requirement and
+      // insert a write VXRM if needed.
+      // NOTE: If one predecessor is able to provide the requirement, but
+      // another isn't, it means we have a critical edge. The better placement
+      // would be to split the critical edge.
+      for (MachineBasicBlock *P : MBB.predecessors()) {
+        const BlockData &PInfo = BlockInfo[P->getNumber()];
+        // If it's available out of the predecessor, then we're ok.
+        if (PInfo.AvailableOut.isStatic() &&
+            PInfo.AvailableOut.getVXRMImm() ==
+                BBInfo.AnticipatedIn.getVXRMImm())
+          continue;
+        // If the predecessor anticipated this value for all its succesors,
+        // then it should have already inserted.
+        if (PInfo.AnticipatedOut.isStatic() &&
+            PInfo.AnticipatedOut.getVXRMImm() ==
+                BBInfo.AnticipatedIn.getVXRMImm())
+          continue;
+        NeedInsert = true;
+        break;
+      }
+    }
+
+    if (NeedInsert) {
+      BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(),
+              TII->get(RISCV::WriteVXRMImm))
+          .addImm(BBInfo.AnticipatedIn.getVXRMImm());
+      Info.setVXRMImm(BBInfo.AnticipatedIn.getVXRMImm());
+    }
+    Info = BBInfo.AnticipatedIn;
+  }
+
+  for (MachineInstr &MI : MBB) {
+    int VXRMIdx = RISCVII::getVXRMOpNum(MI.getDesc());
+    if (VXRMIdx >= 0) {
+      unsigned NewVXRMImm = MI.getOperand(VXRMIdx).getImm();
+
+      if (!Info.isStatic() || Info.getVXRMImm() != NewVXRMImm) {
+        LLVM_DEBUG(dbgs() << "Inserting before "; MI.print(dbgs()));
+        BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::WriteVXRMImm))
+            .addImm(NewVXRMImm);
+      }
+
+      MI.addOperand(MachineOperand::CreateReg(RISCV::VXRM, /*IsDef*/ false,
+                                              /*IsImp*/ true));
+      Info.setVXRMImm(NewVXRMImm);
+      continue;
+    }
+
+    if (MI.getOpcode() == RISCV::WriteVXRMImm) {
+      Info.setVXRMImm(MI.getOperand(0).getImm());
+      continue;
+    }
+
+    if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VXRM))
+      Info.setUnknown();
+  }
+
+  // If all our successors anticipate a value, do the insert.
+  // NOTE: It's possible that not all predecessors of our successor provide the
+  // correct value. This can occur on critical edges. If we don't split the
+  // critical edge we'll also have a write vxrm in the succesor that is
+  // redundant with this one.
+  if (BBInfo.AnticipatedOut.isStatic() &&
+      (!Info.isStatic() ||
+       Info.getVXRMImm() != BBInfo.AnticipatedOut.getVXRMImm())) {
+    LLVM_DEBUG(dbgs() << "Inserting at end of " << printMBBReference(MBB)
+                      << " changing to " << BBInfo.AnticipatedOut << "\n");
+    BuildMI(MBB, MBB.getFirstTerminator(), DebugLoc(),
+            TII->get(RISCV::WriteVXRMImm))
+        .addImm(BBInfo.AnticipatedOut.getVXRMImm());
+  }
+}
+
+bool RISCVInsertWriteVXRM::runOnMachineFunction(MachineFunction &MF) {
+  // Skip if the vector extension is not enabled.
+  const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
+  if (!ST.hasVInstructions())
+    return false;
+
+  TII = ST.getInstrInfo();
+
+  assert(BlockInfo.empty() && "Expect empty block infos");
+  BlockInfo.resize(MF.getNumBlockIDs());
+
+  // Phase 1 - collect block information.
+  bool NeedVXRMChange = false;
+  for (const MachineBasicBlock &MBB : MF)
+    NeedVXRMChange |= computeVXRMChanges(MBB);
+
+  if (NeedVXRMChange) {
+    // Phase 2 - Compute available VXRM using a forward walk.
+    for (const MachineBasicBlock &MBB : MF) {
+      WorkList.push(&MBB);
+      BlockInfo[MBB.getNumber()].InQueue = true;
+    }
+    while (!WorkList.empty()) {
+      const MachineBasicBlock &MBB = *WorkList.front();
+      WorkList.pop();
+      computeAvailable(MBB);
+    }
+
+    // Phase 3 - Compute anticipated VXRM using a backwards walk.
+    for (const MachineBasicBlock &MBB : llvm::reverse(MF)) {
+      WorkList.push(&MBB);
+      BlockInfo[MBB.getNumber()].InQueue = true;
+    }
+    while (!WorkList.empty()) {
+      const MachineBasicBlock &MBB = *WorkList.front();
+      WorkList.pop();
+      computeAnticipated(MBB);
+    }
+
+    // Phase 4 - Emit VXRM writes at the earliest place possible.
+    for (MachineBasicBlock &MBB : MF)
+      emitWriteVXRM(MBB);
+  }
+
+  BlockInfo.clear();
+
+  return NeedVXRMChange;
+}
+
+FunctionPass *llvm::createRISCVInsertWriteVXRMPass() {
+  return new RISCVInsertWriteVXRM();
+}
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 953ac097b915044..137d82505d152db 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -103,6 +103,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
   initializeRISCVExpandPseudoPass(*PR);
   initializeRISCVInsertVSETVLIPass(*PR);
   initializeRISCVInsertReadWriteCSRPass(*PR);
+  initializeRISCVInsertWriteVXRMPass(*PR);
   initializeRISCVDAGToDAGISelPass(*PR);
   initializeRISCVInitUndefPass(*PR);
   initializeRISCVMoveMergePass(*PR);
@@ -432,6 +433,7 @@ void RISCVPassConfig::addPreRegAlloc() {
       EnableRISCVDeadRegisterElimination)
     addPass(createRISCVDeadRegisterDefinitionsPass());
   addPass(createRISCVInsertReadWriteCSRPass());
+  addPass(createRISCVInsertWriteVXRMPass());
 }
 
 void RISCVPassConfig::addOptimizedRegAlloc() {
diff --git a/llvm/test/CodeGen/RISCV/O0-pipeline.ll b/llvm/test/CodeGen/RISCV/O0-pipeline.ll
index 1d9af9df2f718f0..e01d2d45263434e 100644
--- a/llvm/test/CodeGen/RISCV/O0-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O0-pipeline.ll
@@ -42,6 +42,7 @@
 ; CHECK-NEXT:       RISC-V Pre-RA pseudo instruction expansion pass
 ; CHECK-NEXT:       RISC-V Insert VSETVLI pass
 ; CHECK-NEXT:       RISC-V Insert Read/Write CSR Pass
+; CHECK-NEXT:       RISC-V Insert Write VXRM Pass
 ; CHECK-NEXT:       RISC-V init undef pass
 ; CHECK-NEXT:       Eliminate PHI nodes for register allocation
 ; CHECK-NEXT:       Two-Address instruction pass
diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
index cf0826096bd41f8..f1d56e9e18e2bae 100644
--- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
@@ -110,6 +110,7 @@
 ; CHECK-NEXT:       RISC-V Insert VSETVLI pass
 ; CHECK-NEXT:       RISC-V Dead register definitions
 ; CHECK-NEXT:       RISC-V Insert Read/Write CSR Pass
+; CHECK-NEXT:       RISC-V Insert Write VXRM Pass
 ; CHECK-NEXT:       Detect Dead Lanes
 ; CHECK-NEXT:       RISC-V init undef pass
 ; CHECK-NEXT:       Process Implicit Definitions
diff --git a/llvm/test/CodeGen/RISCV/rvv/copyprop.mir b/llvm/test/CodeGen/RISCV/rvv/copyprop.mir
index eb4c8bfdd67f9a6..7c9a943bd50b662 100644
--- a/llvm/test/CodeGen/RISCV/rvv/copyprop.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/copyprop.mir
@@ -5,11 +5,11 @@
   define void @foo() {
   ; CHECK-LABEL: foo:
   ; CHECK:       # %bb.0: # %entry
+  ; CHECK-NEXT:    csrwi vxrm, 0
   ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
   ; CHECK-NEXT:    vmsne.vi v0, v8, 0
   ; CHECK-NEXT:    vsll.vi v9, v8, 5
   ; CHECK-NEXT:    vmerge.vim v9, v9, -1, v0
-  ; CHECK-NEXT:    csrwi vxrm, 0
   ; CHECK-NEXT:    vssra.vi v8, v8, 2
   ; CHECK-NEXT:    bgeu a0, zero, .LBB0_3
   ; CHECK-NEXT:  # %bb.1: # %entry
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
index 3c9fe78a86671b8..04be737f38eb931 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
@@ -516,8 +516,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -543,8 +543,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -570,8 +570,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -597,8 +597,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vnclip.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
index b11d86882ebc08b..d5e8a0347c2d1d2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
@@ -489,8 +489,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -515,8 +515,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -541,8 +541,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -567,8 +567,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
index 8e5035e59262b1a..be2fb7336189e0f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
@@ -489,8 +489,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -515,8 +515,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -541,8 +541,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -567,8 +567,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
index cabb0db215fd29b..11084bb033335dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
@@ -489,8 +489,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -515,8 +515,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -541,8 +541,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -567,8 +567,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll b/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll
index da56e235ecd8ed2..f5a2021a5aeeb06 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll
@@ -7,19 +7,19 @@
 define dso_local void @test(ptr nocapture noundef %var_99) {
 ; CHECK-LABEL: test:
 ; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    csrwi vxrm, 0
 ; CHECK-NEXT:    lui a1, %hi(.L__const.test.var_45)
 ; CHECK-NEXT:    addi a1, a1, %lo(.L__const.test.var_45)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, m4, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a1)
-; CHECK-NEXT:    li a1, 1
-; CHECK-NEXT:    vmul.vx v12, v8, a1
 ; CHECK-NEXT:    lui a1, %hi(.L__const.test.var_101)
 ; CHECK-NEXT:    addi a1, a1, %lo(.L__const.test.var_101)
-; CHECK-NEXT:    vle8.v v16, (a1)
-; CHECK-NEXT:    vmv.x.s a1, v12
-; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vle8.v v12, (a1)
+; CHECK-NEXT:    li a1, 1
+; CHECK-NEXT:    vmul.vx v16, v8, a1
+; CHECK-NEXT:    vmv.x.s a1, v16
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a1
-; CHECK-NEXT:    vssra.vv v8, v16, v8
+; CHECK-NEXT:    vssra.vv v8, v12, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v8, v0
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
index 1ea2b7ef57cf081..5a821f5dec91d14 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
@@ -963,8 +963,8 @@ entry:
 define <vscale x 1 x i16> @test_vaaddu(<vscale x 1 x i16> %var_11, i16 zeroext %var_9, <vscale x 1 x i1> %var_5, <vscale x 1 x i16> %var_0) {
 ; CHECK-LABEL: test_vaaddu:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetivli zero, 3, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetivli zero, 3, e16, mf4, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v9, v8, a0, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
index eccd72a14f27089..00f38f19806b72b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
@@ -110,8 +110,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.rm.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -133,8 +133,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaaddu.rm.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -199,8 +199,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vasub.rm.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vasub.vv v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -222,8 +222,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vasubu.rm.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -950,8 +950,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -974,8 +974,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1271,8 +1271,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1301,16 +1301,16 @@ define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ; RV32-NEXT:    addi a0, sp, 8
 ; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
 ; RV32-NEXT:    csrwi vxrm, 0
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, tu, ma
 ; RV32-NEXT:    vsmul.vv v8, v9, v10
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
 ; RV64-NEXT:    vsmul.vx v8, v9, a0
 ; RV64-NEXT:    ret
 entry:
@@ -1376,8 +1376,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vssra_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vssra.vv v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -1400,8 +1400,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vssrl_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vssrl.vv v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd.ll
index 82cd4bf162b96dd..096e60b6285ffdf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaadd.ll
@@ -13,8 +13,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -37,8 +37,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -61,8 +61,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -85,8 +85,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -109,8 +109,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -133,8 +133,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -157,8 +157,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -181,8 +181,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -205,8 +205,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -229,8 +229,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -253,8 +253,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -277,8 +277,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -301,8 +301,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -326,8 +326,8 @@ define <vscale x 64 x i8> @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vsca
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -350,8 +350,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -374,8 +374,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -398,8 +398,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -422,8 +422,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -446,8 +446,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -470,8 +470,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -494,8 +494,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -518,8 +518,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -542,8 +542,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -566,8 +566,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -590,8 +590,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -615,8 +615,8 @@ define <vscale x 32 x i16> @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -639,8 +639,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -663,8 +663,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -687,8 +687,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -711,8 +711,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -735,8 +735,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -759,8 +759,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -783,8 +783,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -807,8 +807,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -831,8 +831,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -856,8 +856,8 @@ define <vscale x 16 x i32> @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -880,8 +880,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -904,8 +904,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -928,8 +928,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -952,8 +952,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -976,8 +976,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1000,8 +1000,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1024,8 +1024,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vaadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -1049,8 +1049,8 @@ define <vscale x 8 x i64> @intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vaadd.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1073,8 +1073,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1097,8 +1097,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1121,8 +1121,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1145,8 +1145,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1169,8 +1169,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1193,8 +1193,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1217,8 +1217,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1241,8 +1241,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1265,8 +1265,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1289,8 +1289,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1313,8 +1313,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1337,8 +1337,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1361,8 +1361,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1385,8 +1385,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1409,8 +1409,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1433,8 +1433,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1457,8 +1457,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1481,8 +1481,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1505,8 +1505,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1529,8 +1529,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1553,8 +1553,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1577,8 +1577,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1601,8 +1601,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1625,8 +1625,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1649,8 +1649,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1673,8 +1673,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1697,8 +1697,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1721,8 +1721,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1745,8 +1745,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1769,8 +1769,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1793,8 +1793,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1817,8 +1817,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1841,8 +1841,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1865,8 +1865,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1889,8 +1889,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vaadd.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1913,8 +1913,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaadd_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vaadd.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1950,8 +1950,8 @@ define <vscale x 1 x i64> @intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ;
 ; RV64-LABEL: intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vaadd.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -1987,8 +1987,8 @@ define <vscale x 1 x i64> @intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ;
 ; RV64-LABEL: intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; RV64-NEXT:    vaadd.vx v8, v9, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
@@ -2024,8 +2024,8 @@ define <vscale x 2 x i64> @intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ;
 ; RV64-LABEL: intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vaadd.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -2061,8 +2061,8 @@ define <vscale x 2 x i64> @intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ;
 ; RV64-LABEL: intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; RV64-NEXT:    vaadd.vx v8, v10, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
@@ -2098,8 +2098,8 @@ define <vscale x 4 x i64> @intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ;
 ; RV64-LABEL: intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    vaadd.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -2135,8 +2135,8 @@ define <vscale x 4 x i64> @intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ;
 ; RV64-LABEL: intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV64-NEXT:    vaadd.vx v8, v12, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
@@ -2172,8 +2172,8 @@ define <vscale x 8 x i64> @intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ;
 ; RV64-LABEL: intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64-NEXT:    vaadd.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -2209,8 +2209,8 @@ define <vscale x 8 x i64> @intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ;
 ; RV64-LABEL: intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV64-NEXT:    vaadd.vx v8, v16, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll
index eba87d7061d3dc4..a15a1932360a5cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu.ll
@@ -13,8 +13,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -37,8 +37,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -61,8 +61,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -85,8 +85,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -109,8 +109,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -133,8 +133,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -157,8 +157,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -181,8 +181,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -205,8 +205,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -229,8 +229,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -253,8 +253,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -277,8 +277,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -301,8 +301,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -326,8 +326,8 @@ define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vsc
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -350,8 +350,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -374,8 +374,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -398,8 +398,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -422,8 +422,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -446,8 +446,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -470,8 +470,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -494,8 +494,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -518,8 +518,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -542,8 +542,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -566,8 +566,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -590,8 +590,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -615,8 +615,8 @@ define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -639,8 +639,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -663,8 +663,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -687,8 +687,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -711,8 +711,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -735,8 +735,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -759,8 +759,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -783,8 +783,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -807,8 +807,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -831,8 +831,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -856,8 +856,8 @@ define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -880,8 +880,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -904,8 +904,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -928,8 +928,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -952,8 +952,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -976,8 +976,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1000,8 +1000,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1024,8 +1024,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vaaddu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -1049,8 +1049,8 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsc
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vaaddu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1073,8 +1073,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1097,8 +1097,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1121,8 +1121,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1145,8 +1145,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1169,8 +1169,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1193,8 +1193,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1217,8 +1217,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1241,8 +1241,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1265,8 +1265,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1289,8 +1289,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1313,8 +1313,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1337,8 +1337,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1361,8 +1361,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1385,8 +1385,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1409,8 +1409,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1433,8 +1433,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1457,8 +1457,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1481,8 +1481,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1505,8 +1505,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1529,8 +1529,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1553,8 +1553,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1577,8 +1577,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1601,8 +1601,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1625,8 +1625,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1649,8 +1649,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1673,8 +1673,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1697,8 +1697,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1721,8 +1721,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1745,8 +1745,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1769,8 +1769,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1793,8 +1793,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1817,8 +1817,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1841,8 +1841,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1865,8 +1865,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1889,8 +1889,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vaaddu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1913,8 +1913,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vaaddu_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vaaddu.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1950,8 +1950,8 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ;
 ; RV64-LABEL: intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vaaddu.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -1987,8 +1987,8 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
 ;
 ; RV64-LABEL: intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 1
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; RV64-NEXT:    vaaddu.vx v8, v9, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
@@ -2024,8 +2024,8 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ;
 ; RV64-LABEL: intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vaaddu.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -2061,8 +2061,8 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
 ;
 ; RV64-LABEL: intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 1
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; RV64-NEXT:    vaaddu.vx v8, v10, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
@@ -2098,8 +2098,8 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ;
 ; RV64-LABEL: intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    vaaddu.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -2135,8 +2135,8 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
 ;
 ; RV64-LABEL: intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 1
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV64-NEXT:    vaaddu.vx v8, v12, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
@@ -2172,8 +2172,8 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ;
 ; RV64-LABEL: intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64-NEXT:    vaaddu.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -2209,8 +2209,8 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
 ;
 ; RV64-LABEL: intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 1
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV64-NEXT:    vaaddu.vx v8, v16, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub.ll b/llvm/test/CodeGen/RISCV/rvv/vasub.ll
index d69910efb0ee169..c3cb66f7f230a60 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasub.ll
@@ -13,8 +13,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -37,8 +37,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -61,8 +61,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -85,8 +85,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -109,8 +109,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -133,8 +133,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -157,8 +157,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -181,8 +181,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -205,8 +205,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -229,8 +229,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -253,8 +253,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -277,8 +277,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -301,8 +301,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -326,8 +326,8 @@ define <vscale x 64 x i8> @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vsca
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -350,8 +350,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -374,8 +374,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -398,8 +398,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -422,8 +422,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -446,8 +446,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -470,8 +470,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -494,8 +494,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -518,8 +518,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -542,8 +542,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -566,8 +566,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -590,8 +590,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -615,8 +615,8 @@ define <vscale x 32 x i16> @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -639,8 +639,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -663,8 +663,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -687,8 +687,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -711,8 +711,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -735,8 +735,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -759,8 +759,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -783,8 +783,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -807,8 +807,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -831,8 +831,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -856,8 +856,8 @@ define <vscale x 16 x i32> @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -880,8 +880,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -904,8 +904,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -928,8 +928,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -952,8 +952,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -976,8 +976,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1000,8 +1000,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1024,8 +1024,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vasub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -1049,8 +1049,8 @@ define <vscale x 8 x i64> @intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vasub.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1073,8 +1073,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vasub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1097,8 +1097,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1121,8 +1121,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vasub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1145,8 +1145,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1169,8 +1169,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vasub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1193,8 +1193,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1217,8 +1217,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vasub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1241,8 +1241,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1265,8 +1265,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vasub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1289,8 +1289,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1313,8 +1313,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vasub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1337,8 +1337,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1361,8 +1361,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vasub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1385,8 +1385,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1409,8 +1409,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vasub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1433,8 +1433,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1457,8 +1457,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vasub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1481,8 +1481,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1505,8 +1505,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vasub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1529,8 +1529,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1553,8 +1553,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vasub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1577,8 +1577,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1601,8 +1601,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vasub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1625,8 +1625,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1649,8 +1649,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vasub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1673,8 +1673,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1697,8 +1697,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vasub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1721,8 +1721,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1745,8 +1745,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vasub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1769,8 +1769,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1793,8 +1793,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vasub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1817,8 +1817,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1841,8 +1841,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vasub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1865,8 +1865,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1889,8 +1889,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vasub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vasub.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1913,8 +1913,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasub_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vasub.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1950,8 +1950,8 @@ define <vscale x 1 x i64> @intrinsic_vasub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ;
 ; RV64-LABEL: intrinsic_vasub_vx_nxv1i64_nxv1i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vasub.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -1987,8 +1987,8 @@ define <vscale x 1 x i64> @intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
 ;
 ; RV64-LABEL: intrinsic_vasub_mask_vx_nxv1i64_nxv1i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 1
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; RV64-NEXT:    vasub.vx v8, v9, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
@@ -2024,8 +2024,8 @@ define <vscale x 2 x i64> @intrinsic_vasub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ;
 ; RV64-LABEL: intrinsic_vasub_vx_nxv2i64_nxv2i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vasub.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -2061,8 +2061,8 @@ define <vscale x 2 x i64> @intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
 ;
 ; RV64-LABEL: intrinsic_vasub_mask_vx_nxv2i64_nxv2i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 1
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; RV64-NEXT:    vasub.vx v8, v10, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
@@ -2098,8 +2098,8 @@ define <vscale x 4 x i64> @intrinsic_vasub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ;
 ; RV64-LABEL: intrinsic_vasub_vx_nxv4i64_nxv4i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    vasub.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -2135,8 +2135,8 @@ define <vscale x 4 x i64> @intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
 ;
 ; RV64-LABEL: intrinsic_vasub_mask_vx_nxv4i64_nxv4i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 1
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV64-NEXT:    vasub.vx v8, v12, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
@@ -2172,8 +2172,8 @@ define <vscale x 8 x i64> @intrinsic_vasub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ;
 ; RV64-LABEL: intrinsic_vasub_vx_nxv8i64_nxv8i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64-NEXT:    vasub.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -2209,8 +2209,8 @@ define <vscale x 8 x i64> @intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
 ;
 ; RV64-LABEL: intrinsic_vasub_mask_vx_nxv8i64_nxv8i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 1
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV64-NEXT:    vasub.vx v8, v16, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu.ll
index 4228e067199fb45..6d790a9ce0277de 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vasubu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vasubu.ll
@@ -13,8 +13,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -37,8 +37,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -61,8 +61,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -85,8 +85,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -109,8 +109,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -133,8 +133,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -157,8 +157,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -181,8 +181,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -205,8 +205,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -229,8 +229,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -253,8 +253,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -277,8 +277,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -301,8 +301,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -326,8 +326,8 @@ define <vscale x 64 x i8> @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vsc
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -350,8 +350,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -374,8 +374,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -398,8 +398,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -422,8 +422,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -446,8 +446,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -470,8 +470,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -494,8 +494,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -518,8 +518,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -542,8 +542,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -566,8 +566,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -590,8 +590,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -615,8 +615,8 @@ define <vscale x 32 x i16> @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -639,8 +639,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -663,8 +663,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -687,8 +687,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -711,8 +711,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -735,8 +735,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -759,8 +759,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -783,8 +783,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -807,8 +807,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -831,8 +831,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -856,8 +856,8 @@ define <vscale x 16 x i32> @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -880,8 +880,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -904,8 +904,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -928,8 +928,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -952,8 +952,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -976,8 +976,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1000,8 +1000,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1024,8 +1024,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vasubu.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -1049,8 +1049,8 @@ define <vscale x 8 x i64> @intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsc
 ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vasubu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1073,8 +1073,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1097,8 +1097,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1121,8 +1121,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1145,8 +1145,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1169,8 +1169,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1193,8 +1193,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1217,8 +1217,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1241,8 +1241,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1265,8 +1265,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1289,8 +1289,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1313,8 +1313,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1337,8 +1337,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1361,8 +1361,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1385,8 +1385,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1409,8 +1409,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1433,8 +1433,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1457,8 +1457,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1481,8 +1481,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1505,8 +1505,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1529,8 +1529,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1553,8 +1553,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1577,8 +1577,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1601,8 +1601,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1625,8 +1625,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1649,8 +1649,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1673,8 +1673,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1697,8 +1697,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1721,8 +1721,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1745,8 +1745,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1769,8 +1769,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1793,8 +1793,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1817,8 +1817,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1841,8 +1841,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1865,8 +1865,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1889,8 +1889,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vasubu.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1913,8 +1913,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vasubu_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vasubu.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1950,8 +1950,8 @@ define <vscale x 1 x i64> @intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
 ;
 ; RV64-LABEL: intrinsic_vasubu_vx_nxv1i64_nxv1i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; RV64-NEXT:    vasubu.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -1987,8 +1987,8 @@ define <vscale x 1 x i64> @intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
 ;
 ; RV64-LABEL: intrinsic_vasubu_mask_vx_nxv1i64_nxv1i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 1
+; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; RV64-NEXT:    vasubu.vx v8, v9, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
@@ -2024,8 +2024,8 @@ define <vscale x 2 x i64> @intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
 ;
 ; RV64-LABEL: intrinsic_vasubu_vx_nxv2i64_nxv2i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; RV64-NEXT:    vasubu.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -2061,8 +2061,8 @@ define <vscale x 2 x i64> @intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
 ;
 ; RV64-LABEL: intrinsic_vasubu_mask_vx_nxv2i64_nxv2i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 1
+; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; RV64-NEXT:    vasubu.vx v8, v10, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
@@ -2098,8 +2098,8 @@ define <vscale x 4 x i64> @intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
 ;
 ; RV64-LABEL: intrinsic_vasubu_vx_nxv4i64_nxv4i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; RV64-NEXT:    vasubu.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -2135,8 +2135,8 @@ define <vscale x 4 x i64> @intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
 ;
 ; RV64-LABEL: intrinsic_vasubu_mask_vx_nxv4i64_nxv4i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 1
+; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; RV64-NEXT:    vasubu.vx v8, v12, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
@@ -2172,8 +2172,8 @@ define <vscale x 8 x i64> @intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
 ;
 ; RV64-LABEL: intrinsic_vasubu_vx_nxv8i64_nxv8i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64-NEXT:    vasubu.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
@@ -2209,8 +2209,8 @@ define <vscale x 8 x i64> @intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
 ;
 ; RV64-LABEL: intrinsic_vasubu_mask_vx_nxv8i64_nxv8i64_i64:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV64-NEXT:    csrwi vxrm, 1
+; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; RV64-NEXT:    vasubu.vx v8, v16, a0, v0.t
 ; RV64-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip.ll
index 54f4c17dd7ed832..8902b1a28f8c1e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclip.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclip.ll
@@ -13,8 +13,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vnclip.wv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -37,8 +37,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -61,8 +61,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vnclip.wv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -85,8 +85,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -109,8 +109,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vnclip.wv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -133,8 +133,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -157,8 +157,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vnclip.wv v11, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v11
 ; CHECK-NEXT:    ret
@@ -182,8 +182,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -206,8 +206,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vnclip.wv v14, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v14
 ; CHECK-NEXT:    ret
@@ -231,8 +231,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -255,8 +255,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vnclip.wv v20, v8, v16
 ; CHECK-NEXT:    vmv.v.v v8, v20
 ; CHECK-NEXT:    ret
@@ -280,8 +280,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -304,8 +304,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnclip.wv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -328,8 +328,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -352,8 +352,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vnclip.wv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -376,8 +376,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -400,8 +400,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vnclip.wv v11, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v11
 ; CHECK-NEXT:    ret
@@ -425,8 +425,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -449,8 +449,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vnclip.wv v14, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v14
 ; CHECK-NEXT:    ret
@@ -474,8 +474,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -498,8 +498,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vnclip.wv v20, v8, v16
 ; CHECK-NEXT:    vmv.v.v v8, v20
 ; CHECK-NEXT:    ret
@@ -523,8 +523,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -547,8 +547,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnclip.wv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -571,8 +571,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -595,8 +595,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vnclip.wv v11, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v11
 ; CHECK-NEXT:    ret
@@ -620,8 +620,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -644,8 +644,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vnclip.wv v14, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v14
 ; CHECK-NEXT:    ret
@@ -669,8 +669,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -693,8 +693,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vnclip.wv v20, v8, v16
 ; CHECK-NEXT:    vmv.v.v v8, v20
 ; CHECK-NEXT:    ret
@@ -718,8 +718,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -741,8 +741,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
 define <vscale x 1 x i8> @intrinsic_vnclip_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vnclip.wx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -765,8 +765,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
 define <vscale x 1 x i8> @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -788,8 +788,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
 define <vscale x 2 x i8> @intrinsic_vnclip_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vnclip.wx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -812,8 +812,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
 define <vscale x 2 x i8> @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -835,8 +835,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
 define <vscale x 4 x i8> @intrinsic_vnclip_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vnclip.wx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -859,8 +859,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
 define <vscale x 4 x i8> @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -882,8 +882,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
 define <vscale x 8 x i8> @intrinsic_vnclip_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vnclip.wx v10, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
@@ -907,8 +907,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
 define <vscale x 8 x i8> @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,8 +930,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
 define <vscale x 16 x i8> @intrinsic_vnclip_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vnclip.wx v12, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
@@ -955,8 +955,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
 define <vscale x 16 x i8> @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -978,8 +978,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
 define <vscale x 32 x i8> @intrinsic_vnclip_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vnclip.wx v16, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
@@ -1003,8 +1003,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
 define <vscale x 32 x i8> @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1026,8 +1026,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
 define <vscale x 1 x i16> @intrinsic_vnclip_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnclip.wx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1050,8 +1050,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
 define <vscale x 1 x i16> @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1073,8 +1073,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
 define <vscale x 2 x i16> @intrinsic_vnclip_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vnclip.wx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1097,8 +1097,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
 define <vscale x 2 x i16> @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1120,8 +1120,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
 define <vscale x 4 x i16> @intrinsic_vnclip_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vnclip.wx v10, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
@@ -1145,8 +1145,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
 define <vscale x 4 x i16> @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1168,8 +1168,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
 define <vscale x 8 x i16> @intrinsic_vnclip_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vnclip.wx v12, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
@@ -1193,8 +1193,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
 define <vscale x 8 x i16> @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1216,8 +1216,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
 define <vscale x 16 x i16> @intrinsic_vnclip_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vnclip.wx v16, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
@@ -1241,8 +1241,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
 define <vscale x 16 x i16> @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1264,8 +1264,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
 define <vscale x 1 x i32> @intrinsic_vnclip_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnclip.wx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1288,8 +1288,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
 define <vscale x 1 x i32> @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1311,8 +1311,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
 define <vscale x 2 x i32> @intrinsic_vnclip_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vnclip.wx v10, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
@@ -1336,8 +1336,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
 define <vscale x 2 x i32> @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1359,8 +1359,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
 define <vscale x 4 x i32> @intrinsic_vnclip_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vnclip.wx v12, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
@@ -1384,8 +1384,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
 define <vscale x 4 x i32> @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1407,8 +1407,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
 define <vscale x 8 x i32> @intrinsic_vnclip_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vnclip.wx v16, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
@@ -1432,8 +1432,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
 define <vscale x 8 x i32> @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1450,8 +1450,8 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vnclip.wi v8, v8, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1467,8 +1467,8 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1485,8 +1485,8 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vnclip.wi v8, v8, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1502,8 +1502,8 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1520,8 +1520,8 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vnclip.wi v8, v8, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1537,8 +1537,8 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1555,8 +1555,8 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vnclip.wi v10, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
@@ -1573,8 +1573,8 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1591,8 +1591,8 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vnclip.wi v12, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
@@ -1609,8 +1609,8 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1627,8 +1627,8 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vnclip.wi v16, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
@@ -1645,8 +1645,8 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1663,8 +1663,8 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnclip.wi v8, v8, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1680,8 +1680,8 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1698,8 +1698,8 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vnclip.wi v8, v8, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1715,8 +1715,8 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1733,8 +1733,8 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vnclip.wi v10, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
@@ -1751,8 +1751,8 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1769,8 +1769,8 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vnclip.wi v12, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
@@ -1787,8 +1787,8 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1805,8 +1805,8 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vnclip.wi v16, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
@@ -1823,8 +1823,8 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1841,8 +1841,8 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnclip.wi v8, v8, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1858,8 +1858,8 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1876,8 +1876,8 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vnclip.wi v10, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
@@ -1894,8 +1894,8 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1912,8 +1912,8 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vnclip.wi v12, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
@@ -1930,8 +1930,8 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1948,8 +1948,8 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vnclip.wi v16, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
@@ -1966,8 +1966,8 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll
index 39980504f887006..a1804e7d98a4659 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll
@@ -13,8 +13,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vnclipu.wv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -37,8 +37,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -61,8 +61,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vnclipu.wv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -85,8 +85,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -109,8 +109,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vnclipu.wv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -133,8 +133,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -157,8 +157,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vnclipu.wv v11, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v11
 ; CHECK-NEXT:    ret
@@ -182,8 +182,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -206,8 +206,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vnclipu.wv v14, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v14
 ; CHECK-NEXT:    ret
@@ -231,8 +231,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -255,8 +255,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vnclipu.wv v20, v8, v16
 ; CHECK-NEXT:    vmv.v.v v8, v20
 ; CHECK-NEXT:    ret
@@ -280,8 +280,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -304,8 +304,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnclipu.wv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -328,8 +328,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -352,8 +352,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vnclipu.wv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -376,8 +376,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -400,8 +400,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vnclipu.wv v11, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v11
 ; CHECK-NEXT:    ret
@@ -425,8 +425,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -449,8 +449,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vnclipu.wv v14, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v14
 ; CHECK-NEXT:    ret
@@ -474,8 +474,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -498,8 +498,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vnclipu.wv v20, v8, v16
 ; CHECK-NEXT:    vmv.v.v v8, v20
 ; CHECK-NEXT:    ret
@@ -523,8 +523,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -547,8 +547,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnclipu.wv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -571,8 +571,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -595,8 +595,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vnclipu.wv v11, v8, v10
 ; CHECK-NEXT:    vmv.v.v v8, v11
 ; CHECK-NEXT:    ret
@@ -620,8 +620,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -644,8 +644,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vnclipu.wv v14, v8, v12
 ; CHECK-NEXT:    vmv.v.v v8, v14
 ; CHECK-NEXT:    ret
@@ -669,8 +669,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -693,8 +693,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vnclipu.wv v20, v8, v16
 ; CHECK-NEXT:    vmv.v.v v8, v20
 ; CHECK-NEXT:    ret
@@ -718,8 +718,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -741,8 +741,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
 define <vscale x 1 x i8> @intrinsic_vnclipu_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vnclipu.wx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -765,8 +765,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
 define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -788,8 +788,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
 define <vscale x 2 x i8> @intrinsic_vnclipu_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vnclipu.wx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -812,8 +812,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
 define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -835,8 +835,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
 define <vscale x 4 x i8> @intrinsic_vnclipu_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vnclipu.wx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -859,8 +859,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
 define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -882,8 +882,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
 define <vscale x 8 x i8> @intrinsic_vnclipu_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vnclipu.wx v10, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
@@ -907,8 +907,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
 define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,8 +930,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
 define <vscale x 16 x i8> @intrinsic_vnclipu_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vnclipu.wx v12, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
@@ -955,8 +955,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
 define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -978,8 +978,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
 define <vscale x 32 x i8> @intrinsic_vnclipu_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vnclipu.wx v16, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
@@ -1003,8 +1003,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
 define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1026,8 +1026,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
 define <vscale x 1 x i16> @intrinsic_vnclipu_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnclipu.wx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1050,8 +1050,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
 define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1073,8 +1073,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
 define <vscale x 2 x i16> @intrinsic_vnclipu_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vnclipu.wx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1097,8 +1097,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
 define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1120,8 +1120,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
 define <vscale x 4 x i16> @intrinsic_vnclipu_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vnclipu.wx v10, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
@@ -1145,8 +1145,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
 define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1168,8 +1168,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
 define <vscale x 8 x i16> @intrinsic_vnclipu_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vnclipu.wx v12, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
@@ -1193,8 +1193,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
 define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1216,8 +1216,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
 define <vscale x 16 x i16> @intrinsic_vnclipu_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vnclipu.wx v16, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
@@ -1241,8 +1241,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
 define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1264,8 +1264,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
 define <vscale x 1 x i32> @intrinsic_vnclipu_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnclipu.wx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1288,8 +1288,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
 define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1311,8 +1311,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
 define <vscale x 2 x i32> @intrinsic_vnclipu_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vnclipu.wx v10, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
@@ -1336,8 +1336,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
 define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1359,8 +1359,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
 define <vscale x 4 x i32> @intrinsic_vnclipu_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vnclipu.wx v12, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
@@ -1384,8 +1384,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
 define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1407,8 +1407,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
 define <vscale x 8 x i32> @intrinsic_vnclipu_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vnclipu.wx v16, v8, a0
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
@@ -1432,8 +1432,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
 define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1450,8 +1450,8 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vnclipu.wi v8, v8, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1467,8 +1467,8 @@ entry:
 define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1485,8 +1485,8 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vnclipu.wi v8, v8, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1502,8 +1502,8 @@ entry:
 define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1520,8 +1520,8 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vnclipu.wi v8, v8, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1537,8 +1537,8 @@ entry:
 define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1555,8 +1555,8 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vnclipu.wi v10, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
@@ -1573,8 +1573,8 @@ entry:
 define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1591,8 +1591,8 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vnclipu.wi v12, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
@@ -1609,8 +1609,8 @@ entry:
 define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1627,8 +1627,8 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vnclipu.wi v16, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
@@ -1645,8 +1645,8 @@ entry:
 define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1663,8 +1663,8 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vnclipu.wi v8, v8, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1680,8 +1680,8 @@ entry:
 define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1698,8 +1698,8 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vnclipu.wi v8, v8, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1715,8 +1715,8 @@ entry:
 define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1733,8 +1733,8 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vnclipu.wi v10, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
@@ -1751,8 +1751,8 @@ entry:
 define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1769,8 +1769,8 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vnclipu.wi v12, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
@@ -1787,8 +1787,8 @@ entry:
 define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1805,8 +1805,8 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vnclipu.wi v16, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
@@ -1823,8 +1823,8 @@ entry:
 define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1841,8 +1841,8 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vnclipu.wi v8, v8, 9
 ; CHECK-NEXT:    ret
 entry:
@@ -1858,8 +1858,8 @@ entry:
 define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1876,8 +1876,8 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vnclipu.wi v10, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
@@ -1894,8 +1894,8 @@ entry:
 define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1912,8 +1912,8 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vnclipu.wi v12, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
@@ -1930,8 +1930,8 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1948,8 +1948,8 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vnclipu.wi v16, v8, 9
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
@@ -1966,8 +1966,8 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
index d1fcb0f47cb5ff8..e7d8ae635f75c2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
@@ -15,8 +15,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -39,8 +39,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -63,8 +63,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -87,8 +87,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -111,8 +111,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -135,8 +135,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -159,8 +159,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -183,8 +183,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -207,8 +207,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -231,8 +231,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -255,8 +255,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -279,8 +279,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -303,8 +303,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -328,8 +328,8 @@ define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vsca
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -352,8 +352,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -376,8 +376,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -400,8 +400,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -424,8 +424,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -448,8 +448,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -472,8 +472,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -496,8 +496,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -520,8 +520,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -544,8 +544,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -568,8 +568,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -592,8 +592,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -617,8 +617,8 @@ define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -641,8 +641,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -665,8 +665,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -689,8 +689,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -713,8 +713,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -737,8 +737,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -761,8 +761,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -785,8 +785,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -809,8 +809,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -833,8 +833,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -858,8 +858,8 @@ define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -882,8 +882,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -906,8 +906,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,8 +930,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -954,8 +954,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -978,8 +978,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1002,8 +1002,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1026,8 +1026,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -1051,8 +1051,8 @@ define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1075,8 +1075,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1099,8 +1099,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1123,8 +1123,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1147,8 +1147,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1171,8 +1171,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1195,8 +1195,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1219,8 +1219,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1243,8 +1243,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1267,8 +1267,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1291,8 +1291,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1315,8 +1315,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1339,8 +1339,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1363,8 +1363,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1387,8 +1387,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1411,8 +1411,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1435,8 +1435,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1459,8 +1459,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1483,8 +1483,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1507,8 +1507,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1531,8 +1531,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1555,8 +1555,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1579,8 +1579,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1603,8 +1603,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1627,8 +1627,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1651,8 +1651,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1675,8 +1675,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1698,8 +1698,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1722,8 +1722,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1745,8 +1745,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1769,8 +1769,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1792,8 +1792,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1816,8 +1816,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1839,8 +1839,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1863,8 +1863,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1886,8 +1886,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1910,8 +1910,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
index 1fe1baf1cef265f..66bc5c9103a4827 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
@@ -15,8 +15,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -39,8 +39,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -63,8 +63,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -87,8 +87,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
 define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -111,8 +111,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -135,8 +135,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
 define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -159,8 +159,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -183,8 +183,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
 define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -207,8 +207,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -231,8 +231,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
 define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -255,8 +255,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -279,8 +279,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
 define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -303,8 +303,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
 define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -328,8 +328,8 @@ define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vsca
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -352,8 +352,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -376,8 +376,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
 define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -400,8 +400,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -424,8 +424,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
 define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -448,8 +448,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -472,8 +472,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
 define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -496,8 +496,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -520,8 +520,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
 define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -544,8 +544,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -568,8 +568,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
 define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -592,8 +592,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
 define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -617,8 +617,8 @@ define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -641,8 +641,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -665,8 +665,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
 define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -689,8 +689,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -713,8 +713,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
 define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -737,8 +737,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -761,8 +761,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
 define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -785,8 +785,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -809,8 +809,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
 define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -833,8 +833,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
 define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -858,8 +858,8 @@ define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -882,8 +882,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -906,8 +906,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
 define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -930,8 +930,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -954,8 +954,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
 define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -978,8 +978,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -1002,8 +1002,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
 define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1026,8 +1026,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
 define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vsmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -1051,8 +1051,8 @@ define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vsca
 ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vsmul.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1075,8 +1075,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1099,8 +1099,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
 define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1123,8 +1123,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1147,8 +1147,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
 define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1171,8 +1171,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1195,8 +1195,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
 define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1219,8 +1219,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1243,8 +1243,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
 define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1267,8 +1267,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1291,8 +1291,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
 define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1315,8 +1315,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1339,8 +1339,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
 define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1363,8 +1363,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1387,8 +1387,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
 define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1411,8 +1411,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1435,8 +1435,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
 define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1459,8 +1459,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1483,8 +1483,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
 define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1507,8 +1507,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1531,8 +1531,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
 define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1555,8 +1555,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1579,8 +1579,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
 define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1603,8 +1603,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1627,8 +1627,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
 define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1651,8 +1651,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1675,8 +1675,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
 define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1699,8 +1699,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1723,8 +1723,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
 define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1747,8 +1747,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1771,8 +1771,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
 define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1795,8 +1795,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1819,8 +1819,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
 define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1843,8 +1843,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1867,8 +1867,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
 define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1891,8 +1891,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1915,8 +1915,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
 define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1938,8 +1938,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -1962,8 +1962,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
 define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1985,8 +1985,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2009,8 +2009,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
 define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -2032,8 +2032,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2056,8 +2056,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
 define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v12, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -2079,8 +2079,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vsmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -2103,8 +2103,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
 define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vsmul.vx v8, v16, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
index 8e28dd490a875d0..7fd1b05bb444d15 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll
@@ -5,8 +5,8 @@
 define <vscale x 1 x i8> @test_vssra_vv_i8mf8(<vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8mf8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -19,8 +19,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>,
 define <vscale x 1 x i8> @test_vssra_vx_i8mf8(<vscale x 1 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8mf8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -33,8 +33,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i32.i32(<vscale x 1 x i8>, <v
 define <vscale x 2 x i8> @test_vssra_vv_i8mf4(<vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -47,8 +47,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>,
 define <vscale x 2 x i8> @test_vssra_vx_i8mf4(<vscale x 2 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -61,8 +61,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i32.i32(<vscale x 2 x i8>, <v
 define <vscale x 4 x i8> @test_vssra_vv_i8mf2(<vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -75,8 +75,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>,
 define <vscale x 4 x i8> @test_vssra_vx_i8mf2(<vscale x 4 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -89,8 +89,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i32.i32(<vscale x 4 x i8>, <v
 define <vscale x 8 x i8> @test_vssra_vv_i8m1(<vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -103,8 +103,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>,
 define <vscale x 8 x i8> @test_vssra_vx_i8m1(<vscale x 8 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -117,8 +117,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i32.i32(<vscale x 8 x i8>, <v
 define <vscale x 16 x i8> @test_vssra_vv_i8m2(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -131,8 +131,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i32(<vscale x 16 x
 define <vscale x 16 x i8> @test_vssra_vx_i8m2(<vscale x 16 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -145,8 +145,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i32.i32(<vscale x 16 x i8>,
 define <vscale x 32 x i8> @test_vssra_vv_i8m4(<vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -159,8 +159,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i32(<vscale x 32 x
 define <vscale x 32 x i8> @test_vssra_vx_i8m4(<vscale x 32 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -173,8 +173,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i32.i32(<vscale x 32 x i8>,
 define <vscale x 64 x i8> @test_vssra_vv_i8m8(<vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -187,8 +187,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i32(<vscale x 64 x
 define <vscale x 64 x i8> @test_vssra_vx_i8m8(<vscale x 64 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -201,8 +201,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i32.i32(<vscale x 64 x i8>,
 define <vscale x 1 x i16> @test_vssra_vv_i16mf4(<vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -215,8 +215,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i32(<vscale x 1 x i
 define <vscale x 1 x i16> @test_vssra_vx_i16mf4(<vscale x 1 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -229,8 +229,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i32.i32(<vscale x 1 x i16>,
 define <vscale x 2 x i16> @test_vssra_vv_i16mf2(<vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -243,8 +243,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i32(<vscale x 2 x i
 define <vscale x 2 x i16> @test_vssra_vx_i16mf2(<vscale x 2 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -257,8 +257,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i32.i32(<vscale x 2 x i16>,
 define <vscale x 4 x i16> @test_vssra_vv_i16m1(<vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -271,8 +271,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i32(<vscale x 4 x i
 define <vscale x 4 x i16> @test_vssra_vx_i16m1(<vscale x 4 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -285,8 +285,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i32.i32(<vscale x 4 x i16>,
 define <vscale x 8 x i16> @test_vssra_vv_i16m2(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -299,8 +299,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i32(<vscale x 8 x i
 define <vscale x 8 x i16> @test_vssra_vx_i16m2(<vscale x 8 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -313,8 +313,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i32.i32(<vscale x 8 x i16>,
 define <vscale x 16 x i16> @test_vssra_vv_i16m4(<vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -327,8 +327,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i32(<vscale x 16
 define <vscale x 16 x i16> @test_vssra_vx_i16m4(<vscale x 16 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -341,8 +341,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i32.i32(<vscale x 16 x i1
 define <vscale x 32 x i16> @test_vssra_vv_i16m8(<vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -355,8 +355,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i32(<vscale x 32
 define <vscale x 32 x i16> @test_vssra_vx_i16m8(<vscale x 32 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -369,8 +369,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i32.i32(<vscale x 32 x i1
 define <vscale x 1 x i32> @test_vssra_vv_i32mf2(<vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -383,8 +383,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i32(<vscale x 1 x i
 define <vscale x 1 x i32> @test_vssra_vx_i32mf2(<vscale x 1 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -397,8 +397,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32.i32(<vscale x 1 x i32>,
 define <vscale x 2 x i32> @test_vssra_vv_i32m1(<vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -411,8 +411,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i32(<vscale x 2 x i
 define <vscale x 2 x i32> @test_vssra_vx_i32m1(<vscale x 2 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -425,8 +425,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32.i32(<vscale x 2 x i32>,
 define <vscale x 4 x i32> @test_vssra_vv_i32m2(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -439,8 +439,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i32(<vscale x 4 x i
 define <vscale x 4 x i32> @test_vssra_vx_i32m2(<vscale x 4 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -453,8 +453,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32.i32(<vscale x 4 x i32>,
 define <vscale x 8 x i32> @test_vssra_vv_i32m4(<vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -467,8 +467,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i32(<vscale x 8 x i
 define <vscale x 8 x i32> @test_vssra_vx_i32m4(<vscale x 8 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -481,8 +481,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32.i32(<vscale x 8 x i32>,
 define <vscale x 16 x i32> @test_vssra_vv_i32m8(<vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -495,8 +495,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i32(<vscale x 16
 define <vscale x 16 x i32> @test_vssra_vx_i32m8(<vscale x 16 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -509,8 +509,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32.i32(<vscale x 16 x i3
 define <vscale x 1 x i64> @test_vssra_vv_i64m1(<vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -523,8 +523,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i32(<vscale x 1 x i
 define <vscale x 1 x i64> @test_vssra_vx_i64m1(<vscale x 1 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -537,8 +537,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i32.i32(<vscale x 1 x i64>,
 define <vscale x 2 x i64> @test_vssra_vv_i64m2(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -551,8 +551,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i32(<vscale x 2 x i
 define <vscale x 2 x i64> @test_vssra_vx_i64m2(<vscale x 2 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -565,8 +565,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i32.i32(<vscale x 2 x i64>,
 define <vscale x 4 x i64> @test_vssra_vv_i64m4(<vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -579,8 +579,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i32(<vscale x 4 x i
 define <vscale x 4 x i64> @test_vssra_vx_i64m4(<vscale x 4 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -593,8 +593,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i32.i32(<vscale x 4 x i64>,
 define <vscale x 8 x i64> @test_vssra_vv_i64m8(<vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -607,8 +607,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i32(<vscale x 8 x i
 define <vscale x 8 x i64> @test_vssra_vx_i64m8(<vscale x 8 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -621,8 +621,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i32.i32(<vscale x 8 x i64>,
 define <vscale x 1 x i8> @test_vssra_vv_i8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8mf8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -635,8 +635,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x
 define <vscale x 1 x i8> @test_vssra_vx_i8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8mf8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -649,8 +649,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i32.i32(<vscale x 1 x i8
 define <vscale x 2 x i8> @test_vssra_vv_i8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -663,8 +663,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x
 define <vscale x 2 x i8> @test_vssra_vx_i8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -677,8 +677,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i32.i32(<vscale x 2 x i8
 define <vscale x 4 x i8> @test_vssra_vv_i8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -691,8 +691,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x
 define <vscale x 4 x i8> @test_vssra_vx_i8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -705,8 +705,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i32.i32(<vscale x 4 x i8
 define <vscale x 8 x i8> @test_vssra_vv_i8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -719,8 +719,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x
 define <vscale x 8 x i8> @test_vssra_vx_i8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -733,8 +733,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i32.i32(<vscale x 8 x i8
 define <vscale x 16 x i8> @test_vssra_vv_i8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -747,8 +747,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i32(<vscale x
 define <vscale x 16 x i8> @test_vssra_vx_i8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -761,8 +761,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i32.i32(<vscale x 16 x
 define <vscale x 32 x i8> @test_vssra_vv_i8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -775,8 +775,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i32(<vscale x
 define <vscale x 32 x i8> @test_vssra_vx_i8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -789,8 +789,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i32.i32(<vscale x 32 x
 define <vscale x 64 x i8> @test_vssra_vv_i8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -803,8 +803,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i32(<vscale x
 define <vscale x 64 x i8> @test_vssra_vx_i8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -817,8 +817,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i32.i32(<vscale x 64 x
 define <vscale x 1 x i16> @test_vssra_vv_i16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -831,8 +831,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i32(<vscale x
 define <vscale x 1 x i16> @test_vssra_vx_i16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -845,8 +845,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i32.i32(<vscale x 1 x
 define <vscale x 2 x i16> @test_vssra_vv_i16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -859,8 +859,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i32(<vscale x
 define <vscale x 2 x i16> @test_vssra_vx_i16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -873,8 +873,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i32.i32(<vscale x 2 x
 define <vscale x 4 x i16> @test_vssra_vv_i16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -887,8 +887,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i32(<vscale x
 define <vscale x 4 x i16> @test_vssra_vx_i16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -901,8 +901,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i32.i32(<vscale x 4 x
 define <vscale x 8 x i16> @test_vssra_vv_i16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -915,8 +915,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i32(<vscale x
 define <vscale x 8 x i16> @test_vssra_vx_i16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -929,8 +929,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i32.i32(<vscale x 8 x
 define <vscale x 16 x i16> @test_vssra_vv_i16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -943,8 +943,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i32(<vscale
 define <vscale x 16 x i16> @test_vssra_vx_i16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -957,8 +957,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i32.i32(<vscale x 16
 define <vscale x 32 x i16> @test_vssra_vv_i16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -971,8 +971,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i32(<vscale
 define <vscale x 32 x i16> @test_vssra_vx_i16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -985,8 +985,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i32.i32(<vscale x 32
 define <vscale x 1 x i32> @test_vssra_vv_i32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -999,8 +999,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i32(<vscale x
 define <vscale x 1 x i32> @test_vssra_vx_i32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1013,8 +1013,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32.i32(<vscale x 1 x
 define <vscale x 2 x i32> @test_vssra_vv_i32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1027,8 +1027,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i32(<vscale x
 define <vscale x 2 x i32> @test_vssra_vx_i32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1041,8 +1041,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32.i32(<vscale x 2 x
 define <vscale x 4 x i32> @test_vssra_vv_i32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1055,8 +1055,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i32(<vscale x
 define <vscale x 4 x i32> @test_vssra_vx_i32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1069,8 +1069,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32.i32(<vscale x 4 x
 define <vscale x 8 x i32> @test_vssra_vv_i32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1083,8 +1083,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i32(<vscale x
 define <vscale x 8 x i32> @test_vssra_vx_i32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1097,8 +1097,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32.i32(<vscale x 8 x
 define <vscale x 16 x i32> @test_vssra_vv_i32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1111,8 +1111,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i32(<vscale
 define <vscale x 16 x i32> @test_vssra_vx_i32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1125,8 +1125,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32.i32(<vscale x 16
 define <vscale x 1 x i64> @test_vssra_vv_i64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1139,8 +1139,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i32(<vscale x
 define <vscale x 1 x i64> @test_vssra_vx_i64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1153,8 +1153,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i32.i32(<vscale x 1 x
 define <vscale x 2 x i64> @test_vssra_vv_i64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1167,8 +1167,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i32(<vscale x
 define <vscale x 2 x i64> @test_vssra_vx_i64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1181,8 +1181,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i32.i32(<vscale x 2 x
 define <vscale x 4 x i64> @test_vssra_vv_i64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1195,8 +1195,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i32(<vscale x
 define <vscale x 4 x i64> @test_vssra_vx_i64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1209,8 +1209,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i32.i32(<vscale x 4 x
 define <vscale x 8 x i64> @test_vssra_vv_i64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1223,8 +1223,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i32(<vscale x
 define <vscale x 8 x i64> @test_vssra_vx_i64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
index 96ca5e32cf36b55..b7a84e58e6e612c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll
@@ -5,8 +5,8 @@
 define <vscale x 1 x i8> @test_vssra_vv_i8mf8(<vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8mf8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -19,8 +19,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>,
 define <vscale x 1 x i8> @test_vssra_vx_i8mf8(<vscale x 1 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8mf8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -33,8 +33,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8>, <v
 define <vscale x 2 x i8> @test_vssra_vv_i8mf4(<vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -47,8 +47,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>,
 define <vscale x 2 x i8> @test_vssra_vx_i8mf4(<vscale x 2 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -61,8 +61,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8>, <v
 define <vscale x 4 x i8> @test_vssra_vv_i8mf2(<vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -75,8 +75,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>,
 define <vscale x 4 x i8> @test_vssra_vx_i8mf2(<vscale x 4 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -89,8 +89,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8>, <v
 define <vscale x 8 x i8> @test_vssra_vv_i8m1(<vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -103,8 +103,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>,
 define <vscale x 8 x i8> @test_vssra_vx_i8m1(<vscale x 8 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -117,8 +117,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8>, <v
 define <vscale x 16 x i8> @test_vssra_vv_i8m2(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -131,8 +131,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x
 define <vscale x 16 x i8> @test_vssra_vx_i8m2(<vscale x 16 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -145,8 +145,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8>,
 define <vscale x 32 x i8> @test_vssra_vv_i8m4(<vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -159,8 +159,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x
 define <vscale x 32 x i8> @test_vssra_vx_i8m4(<vscale x 32 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -173,8 +173,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8>,
 define <vscale x 64 x i8> @test_vssra_vv_i8m8(<vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -187,8 +187,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x
 define <vscale x 64 x i8> @test_vssra_vx_i8m8(<vscale x 64 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -201,8 +201,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8>,
 define <vscale x 1 x i16> @test_vssra_vv_i16mf4(<vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -215,8 +215,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i
 define <vscale x 1 x i16> @test_vssra_vx_i16mf4(<vscale x 1 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -229,8 +229,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16>,
 define <vscale x 2 x i16> @test_vssra_vv_i16mf2(<vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -243,8 +243,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i
 define <vscale x 2 x i16> @test_vssra_vx_i16mf2(<vscale x 2 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -257,8 +257,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16>,
 define <vscale x 4 x i16> @test_vssra_vv_i16m1(<vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -271,8 +271,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i
 define <vscale x 4 x i16> @test_vssra_vx_i16m1(<vscale x 4 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -285,8 +285,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16>,
 define <vscale x 8 x i16> @test_vssra_vv_i16m2(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -299,8 +299,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i
 define <vscale x 8 x i16> @test_vssra_vx_i16m2(<vscale x 8 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -313,8 +313,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16>,
 define <vscale x 16 x i16> @test_vssra_vv_i16m4(<vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -327,8 +327,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16
 define <vscale x 16 x i16> @test_vssra_vx_i16m4(<vscale x 16 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -341,8 +341,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i1
 define <vscale x 32 x i16> @test_vssra_vv_i16m8(<vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -355,8 +355,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32
 define <vscale x 32 x i16> @test_vssra_vx_i16m8(<vscale x 32 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -369,8 +369,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i1
 define <vscale x 1 x i32> @test_vssra_vv_i32mf2(<vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -383,8 +383,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i
 define <vscale x 1 x i32> @test_vssra_vx_i32mf2(<vscale x 1 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -397,8 +397,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32>,
 define <vscale x 2 x i32> @test_vssra_vv_i32m1(<vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -411,8 +411,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i
 define <vscale x 2 x i32> @test_vssra_vx_i32m1(<vscale x 2 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -425,8 +425,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32>,
 define <vscale x 4 x i32> @test_vssra_vv_i32m2(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -439,8 +439,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i
 define <vscale x 4 x i32> @test_vssra_vx_i32m2(<vscale x 4 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -453,8 +453,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32>,
 define <vscale x 8 x i32> @test_vssra_vv_i32m4(<vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -467,8 +467,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i
 define <vscale x 8 x i32> @test_vssra_vx_i32m4(<vscale x 8 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -481,8 +481,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32>,
 define <vscale x 16 x i32> @test_vssra_vv_i32m8(<vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -495,8 +495,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16
 define <vscale x 16 x i32> @test_vssra_vx_i32m8(<vscale x 16 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -509,8 +509,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i3
 define <vscale x 1 x i64> @test_vssra_vv_i64m1(<vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -523,8 +523,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i
 define <vscale x 1 x i64> @test_vssra_vx_i64m1(<vscale x 1 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -537,8 +537,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64>,
 define <vscale x 2 x i64> @test_vssra_vv_i64m2(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -551,8 +551,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i
 define <vscale x 2 x i64> @test_vssra_vx_i64m2(<vscale x 2 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -565,8 +565,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64>,
 define <vscale x 4 x i64> @test_vssra_vv_i64m4(<vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -579,8 +579,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i
 define <vscale x 4 x i64> @test_vssra_vx_i64m4(<vscale x 4 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -593,8 +593,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64>,
 define <vscale x 8 x i64> @test_vssra_vv_i64m8(<vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -607,8 +607,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i
 define <vscale x 8 x i64> @test_vssra_vx_i64m8(<vscale x 8 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -621,8 +621,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64>,
 define <vscale x 1 x i8> @test_vssra_vv_i8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8mf8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -635,8 +635,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x
 define <vscale x 1 x i8> @test_vssra_vx_i8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8mf8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -649,8 +649,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8
 define <vscale x 2 x i8> @test_vssra_vv_i8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -663,8 +663,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x
 define <vscale x 2 x i8> @test_vssra_vx_i8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -677,8 +677,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8
 define <vscale x 4 x i8> @test_vssra_vv_i8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -691,8 +691,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x
 define <vscale x 4 x i8> @test_vssra_vx_i8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -705,8 +705,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8
 define <vscale x 8 x i8> @test_vssra_vv_i8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -719,8 +719,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x
 define <vscale x 8 x i8> @test_vssra_vx_i8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -733,8 +733,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8
 define <vscale x 16 x i8> @test_vssra_vv_i8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -747,8 +747,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x
 define <vscale x 16 x i8> @test_vssra_vx_i8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -761,8 +761,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x
 define <vscale x 32 x i8> @test_vssra_vv_i8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -775,8 +775,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x
 define <vscale x 32 x i8> @test_vssra_vx_i8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -789,8 +789,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x
 define <vscale x 64 x i8> @test_vssra_vv_i8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i8m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -803,8 +803,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x
 define <vscale x 64 x i8> @test_vssra_vx_i8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i8m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -817,8 +817,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x
 define <vscale x 1 x i16> @test_vssra_vv_i16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -831,8 +831,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x
 define <vscale x 1 x i16> @test_vssra_vx_i16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -845,8 +845,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x
 define <vscale x 2 x i16> @test_vssra_vv_i16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -859,8 +859,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x
 define <vscale x 2 x i16> @test_vssra_vx_i16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -873,8 +873,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x
 define <vscale x 4 x i16> @test_vssra_vv_i16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -887,8 +887,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x
 define <vscale x 4 x i16> @test_vssra_vx_i16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -901,8 +901,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x
 define <vscale x 8 x i16> @test_vssra_vv_i16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -915,8 +915,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x
 define <vscale x 8 x i16> @test_vssra_vx_i16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -929,8 +929,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x
 define <vscale x 16 x i16> @test_vssra_vv_i16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -943,8 +943,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale
 define <vscale x 16 x i16> @test_vssra_vx_i16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -957,8 +957,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16
 define <vscale x 32 x i16> @test_vssra_vv_i16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i16m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -971,8 +971,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale
 define <vscale x 32 x i16> @test_vssra_vx_i16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i16m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -985,8 +985,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32
 define <vscale x 1 x i32> @test_vssra_vv_i32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -999,8 +999,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x
 define <vscale x 1 x i32> @test_vssra_vx_i32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1013,8 +1013,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x
 define <vscale x 2 x i32> @test_vssra_vv_i32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1027,8 +1027,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x
 define <vscale x 2 x i32> @test_vssra_vx_i32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1041,8 +1041,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x
 define <vscale x 4 x i32> @test_vssra_vv_i32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1055,8 +1055,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x
 define <vscale x 4 x i32> @test_vssra_vx_i32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1069,8 +1069,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x
 define <vscale x 8 x i32> @test_vssra_vv_i32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1083,8 +1083,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x
 define <vscale x 8 x i32> @test_vssra_vx_i32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1097,8 +1097,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x
 define <vscale x 16 x i32> @test_vssra_vv_i32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i32m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1111,8 +1111,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale
 define <vscale x 16 x i32> @test_vssra_vx_i32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i32m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1125,8 +1125,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16
 define <vscale x 1 x i64> @test_vssra_vv_i64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1139,8 +1139,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x
 define <vscale x 1 x i64> @test_vssra_vx_i64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1153,8 +1153,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x
 define <vscale x 2 x i64> @test_vssra_vv_i64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1167,8 +1167,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x
 define <vscale x 2 x i64> @test_vssra_vx_i64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1181,8 +1181,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x
 define <vscale x 4 x i64> @test_vssra_vv_i64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1195,8 +1195,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x
 define <vscale x 4 x i64> @test_vssra_vx_i64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1209,8 +1209,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x
 define <vscale x 8 x i64> @test_vssra_vv_i64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vv_i64m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssra.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1223,8 +1223,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x
 define <vscale x 8 x i64> @test_vssra_vx_i64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssra_vx_i64m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vssra.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
index c1a064984dcc161..0c2cdff65776e3f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll
@@ -5,8 +5,8 @@
 define <vscale x 1 x i8> @test_vssrl_vv_u8mf8(<vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8mf8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -19,8 +19,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>,
 define <vscale x 1 x i8> @test_vssrl_vx_u8mf8(<vscale x 1 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8mf8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -33,8 +33,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i32.i32(<vscale x 1 x i8>, <v
 define <vscale x 2 x i8> @test_vssrl_vv_u8mf4(<vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -47,8 +47,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>,
 define <vscale x 2 x i8> @test_vssrl_vx_u8mf4(<vscale x 2 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -61,8 +61,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i32.i32(<vscale x 2 x i8>, <v
 define <vscale x 4 x i8> @test_vssrl_vv_u8mf2(<vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -75,8 +75,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>,
 define <vscale x 4 x i8> @test_vssrl_vx_u8mf2(<vscale x 4 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -89,8 +89,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i32.i32(<vscale x 4 x i8>, <v
 define <vscale x 8 x i8> @test_vssrl_vv_u8m1(<vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -103,8 +103,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>,
 define <vscale x 8 x i8> @test_vssrl_vx_u8m1(<vscale x 8 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -117,8 +117,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i32.i32(<vscale x 8 x i8>, <v
 define <vscale x 16 x i8> @test_vssrl_vv_u8m2(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -131,8 +131,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i32(<vscale x 16 x
 define <vscale x 16 x i8> @test_vssrl_vx_u8m2(<vscale x 16 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -145,8 +145,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i32.i32(<vscale x 16 x i8>,
 define <vscale x 32 x i8> @test_vssrl_vv_u8m4(<vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -159,8 +159,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i32(<vscale x 32 x
 define <vscale x 32 x i8> @test_vssrl_vx_u8m4(<vscale x 32 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -173,8 +173,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i32.i32(<vscale x 32 x i8>,
 define <vscale x 64 x i8> @test_vssrl_vv_u8m8(<vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -187,8 +187,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i32(<vscale x 64 x
 define <vscale x 64 x i8> @test_vssrl_vx_u8m8(<vscale x 64 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -201,8 +201,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i32.i32(<vscale x 64 x i8>,
 define <vscale x 1 x i16> @test_vssrl_vv_u16mf4(<vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -215,8 +215,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i32(<vscale x 1 x i
 define <vscale x 1 x i16> @test_vssrl_vx_u16mf4(<vscale x 1 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -229,8 +229,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i32.i32(<vscale x 1 x i16>,
 define <vscale x 2 x i16> @test_vssrl_vv_u16mf2(<vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -243,8 +243,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i32(<vscale x 2 x i
 define <vscale x 2 x i16> @test_vssrl_vx_u16mf2(<vscale x 2 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -257,8 +257,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i32.i32(<vscale x 2 x i16>,
 define <vscale x 4 x i16> @test_vssrl_vv_u16m1(<vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -271,8 +271,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i32(<vscale x 4 x i
 define <vscale x 4 x i16> @test_vssrl_vx_u16m1(<vscale x 4 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -285,8 +285,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i32.i32(<vscale x 4 x i16>,
 define <vscale x 8 x i16> @test_vssrl_vv_u16m2(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -299,8 +299,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i32(<vscale x 8 x i
 define <vscale x 8 x i16> @test_vssrl_vx_u16m2(<vscale x 8 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -313,8 +313,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i32.i32(<vscale x 8 x i16>,
 define <vscale x 16 x i16> @test_vssrl_vv_u16m4(<vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -327,8 +327,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i32(<vscale x 16
 define <vscale x 16 x i16> @test_vssrl_vx_u16m4(<vscale x 16 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -341,8 +341,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i32.i32(<vscale x 16 x i1
 define <vscale x 32 x i16> @test_vssrl_vv_u16m8(<vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -355,8 +355,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i32(<vscale x 32
 define <vscale x 32 x i16> @test_vssrl_vx_u16m8(<vscale x 32 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -369,8 +369,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i32.i32(<vscale x 32 x i1
 define <vscale x 1 x i32> @test_vssrl_vv_u32mf2(<vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -383,8 +383,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i32(<vscale x 1 x i
 define <vscale x 1 x i32> @test_vssrl_vx_u32mf2(<vscale x 1 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -397,8 +397,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32.i32(<vscale x 1 x i32>,
 define <vscale x 2 x i32> @test_vssrl_vv_u32m1(<vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -411,8 +411,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i32(<vscale x 2 x i
 define <vscale x 2 x i32> @test_vssrl_vx_u32m1(<vscale x 2 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -425,8 +425,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32.i32(<vscale x 2 x i32>,
 define <vscale x 4 x i32> @test_vssrl_vv_u32m2(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -439,8 +439,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i32(<vscale x 4 x i
 define <vscale x 4 x i32> @test_vssrl_vx_u32m2(<vscale x 4 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -453,8 +453,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32.i32(<vscale x 4 x i32>,
 define <vscale x 8 x i32> @test_vssrl_vv_u32m4(<vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -467,8 +467,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i32(<vscale x 8 x i
 define <vscale x 8 x i32> @test_vssrl_vx_u32m4(<vscale x 8 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -481,8 +481,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32.i32(<vscale x 8 x i32>,
 define <vscale x 16 x i32> @test_vssrl_vv_u32m8(<vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -495,8 +495,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i32(<vscale x 16
 define <vscale x 16 x i32> @test_vssrl_vx_u32m8(<vscale x 16 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -509,8 +509,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32.i32(<vscale x 16 x i3
 define <vscale x 1 x i64> @test_vssrl_vv_u64m1(<vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -523,8 +523,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i32(<vscale x 1 x i
 define <vscale x 1 x i64> @test_vssrl_vx_u64m1(<vscale x 1 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -537,8 +537,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i32.i32(<vscale x 1 x i64>,
 define <vscale x 2 x i64> @test_vssrl_vv_u64m2(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -551,8 +551,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i32(<vscale x 2 x i
 define <vscale x 2 x i64> @test_vssrl_vx_u64m2(<vscale x 2 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -565,8 +565,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i32.i32(<vscale x 2 x i64>,
 define <vscale x 4 x i64> @test_vssrl_vv_u64m4(<vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -579,8 +579,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i32(<vscale x 4 x i
 define <vscale x 4 x i64> @test_vssrl_vx_u64m4(<vscale x 4 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -593,8 +593,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i32.i32(<vscale x 4 x i64>,
 define <vscale x 8 x i64> @test_vssrl_vv_u64m8(<vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -607,8 +607,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i32(<vscale x 8 x i
 define <vscale x 8 x i64> @test_vssrl_vx_u64m8(<vscale x 8 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -621,8 +621,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i32.i32(<vscale x 8 x i64>,
 define <vscale x 1 x i8> @test_vssrl_vv_u8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8mf8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -635,8 +635,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x
 define <vscale x 1 x i8> @test_vssrl_vx_u8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8mf8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -649,8 +649,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i32.i32(<vscale x 1 x i8
 define <vscale x 2 x i8> @test_vssrl_vv_u8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -663,8 +663,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x
 define <vscale x 2 x i8> @test_vssrl_vx_u8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -677,8 +677,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i32.i32(<vscale x 2 x i8
 define <vscale x 4 x i8> @test_vssrl_vv_u8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -691,8 +691,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x
 define <vscale x 4 x i8> @test_vssrl_vx_u8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -705,8 +705,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i32.i32(<vscale x 4 x i8
 define <vscale x 8 x i8> @test_vssrl_vv_u8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -719,8 +719,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x
 define <vscale x 8 x i8> @test_vssrl_vx_u8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -733,8 +733,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i32.i32(<vscale x 8 x i8
 define <vscale x 16 x i8> @test_vssrl_vv_u8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -747,8 +747,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i32(<vscale x
 define <vscale x 16 x i8> @test_vssrl_vx_u8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -761,8 +761,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i32.i32(<vscale x 16 x
 define <vscale x 32 x i8> @test_vssrl_vv_u8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -775,8 +775,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i32(<vscale x
 define <vscale x 32 x i8> @test_vssrl_vx_u8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -789,8 +789,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i32.i32(<vscale x 32 x
 define <vscale x 64 x i8> @test_vssrl_vv_u8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -803,8 +803,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i32(<vscale x
 define <vscale x 64 x i8> @test_vssrl_vx_u8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -817,8 +817,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i32.i32(<vscale x 64 x
 define <vscale x 1 x i16> @test_vssrl_vv_u16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -831,8 +831,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i32(<vscale x
 define <vscale x 1 x i16> @test_vssrl_vx_u16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -845,8 +845,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i32.i32(<vscale x 1 x
 define <vscale x 2 x i16> @test_vssrl_vv_u16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -859,8 +859,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i32(<vscale x
 define <vscale x 2 x i16> @test_vssrl_vx_u16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -873,8 +873,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i32.i32(<vscale x 2 x
 define <vscale x 4 x i16> @test_vssrl_vv_u16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -887,8 +887,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i32(<vscale x
 define <vscale x 4 x i16> @test_vssrl_vx_u16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -901,8 +901,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i32.i32(<vscale x 4 x
 define <vscale x 8 x i16> @test_vssrl_vv_u16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -915,8 +915,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i32(<vscale x
 define <vscale x 8 x i16> @test_vssrl_vx_u16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -929,8 +929,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i32.i32(<vscale x 8 x
 define <vscale x 16 x i16> @test_vssrl_vv_u16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -943,8 +943,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i32(<vscale
 define <vscale x 16 x i16> @test_vssrl_vx_u16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -957,8 +957,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i32.i32(<vscale x 16
 define <vscale x 32 x i16> @test_vssrl_vv_u16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -971,8 +971,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i32(<vscale
 define <vscale x 32 x i16> @test_vssrl_vx_u16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -985,8 +985,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i32.i32(<vscale x 32
 define <vscale x 1 x i32> @test_vssrl_vv_u32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -999,8 +999,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i32(<vscale x
 define <vscale x 1 x i32> @test_vssrl_vx_u32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1013,8 +1013,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32.i32(<vscale x 1 x
 define <vscale x 2 x i32> @test_vssrl_vv_u32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1027,8 +1027,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i32(<vscale x
 define <vscale x 2 x i32> @test_vssrl_vx_u32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1041,8 +1041,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32.i32(<vscale x 2 x
 define <vscale x 4 x i32> @test_vssrl_vv_u32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1055,8 +1055,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i32(<vscale x
 define <vscale x 4 x i32> @test_vssrl_vx_u32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1069,8 +1069,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32.i32(<vscale x 4 x
 define <vscale x 8 x i32> @test_vssrl_vv_u32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1083,8 +1083,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i32(<vscale x
 define <vscale x 8 x i32> @test_vssrl_vx_u32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1097,8 +1097,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32.i32(<vscale x 8 x
 define <vscale x 16 x i32> @test_vssrl_vv_u32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1111,8 +1111,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i32(<vscale
 define <vscale x 16 x i32> @test_vssrl_vx_u32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1125,8 +1125,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32.i32(<vscale x 16
 define <vscale x 1 x i64> @test_vssrl_vv_u64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1139,8 +1139,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i32(<vscale x
 define <vscale x 1 x i64> @test_vssrl_vx_u64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1153,8 +1153,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i32.i32(<vscale x 1 x
 define <vscale x 2 x i64> @test_vssrl_vv_u64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1167,8 +1167,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i32(<vscale x
 define <vscale x 2 x i64> @test_vssrl_vx_u64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1181,8 +1181,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i32.i32(<vscale x 2 x
 define <vscale x 4 x i64> @test_vssrl_vv_u64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1195,8 +1195,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i32(<vscale x
 define <vscale x 4 x i64> @test_vssrl_vx_u64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1209,8 +1209,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i32.i32(<vscale x 4 x
 define <vscale x 8 x i64> @test_vssrl_vv_u64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1223,8 +1223,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i32(<vscale x
 define <vscale x 8 x i64> @test_vssrl_vx_u64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, i32 %shift, i32 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
index 0a465db64b7acdb..fe80854bb264681 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll
@@ -5,8 +5,8 @@
 define <vscale x 1 x i8> @test_vssrl_vv_u8mf8(<vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8mf8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -19,8 +19,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>,
 define <vscale x 1 x i8> @test_vssrl_vx_u8mf8(<vscale x 1 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8mf8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -33,8 +33,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8>, <v
 define <vscale x 2 x i8> @test_vssrl_vv_u8mf4(<vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -47,8 +47,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>,
 define <vscale x 2 x i8> @test_vssrl_vx_u8mf4(<vscale x 2 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -61,8 +61,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8>, <v
 define <vscale x 4 x i8> @test_vssrl_vv_u8mf2(<vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -75,8 +75,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>,
 define <vscale x 4 x i8> @test_vssrl_vx_u8mf2(<vscale x 4 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -89,8 +89,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8>, <v
 define <vscale x 8 x i8> @test_vssrl_vv_u8m1(<vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -103,8 +103,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>,
 define <vscale x 8 x i8> @test_vssrl_vx_u8m1(<vscale x 8 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -117,8 +117,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8>, <v
 define <vscale x 16 x i8> @test_vssrl_vv_u8m2(<vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -131,8 +131,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x
 define <vscale x 16 x i8> @test_vssrl_vx_u8m2(<vscale x 16 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -145,8 +145,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8>,
 define <vscale x 32 x i8> @test_vssrl_vv_u8m4(<vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -159,8 +159,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x
 define <vscale x 32 x i8> @test_vssrl_vx_u8m4(<vscale x 32 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -173,8 +173,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8>,
 define <vscale x 64 x i8> @test_vssrl_vv_u8m8(<vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -187,8 +187,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x
 define <vscale x 64 x i8> @test_vssrl_vx_u8m8(<vscale x 64 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -201,8 +201,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8>,
 define <vscale x 1 x i16> @test_vssrl_vv_u16mf4(<vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -215,8 +215,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i
 define <vscale x 1 x i16> @test_vssrl_vx_u16mf4(<vscale x 1 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16mf4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -229,8 +229,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16>,
 define <vscale x 2 x i16> @test_vssrl_vv_u16mf2(<vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -243,8 +243,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i
 define <vscale x 2 x i16> @test_vssrl_vx_u16mf2(<vscale x 2 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -257,8 +257,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16>,
 define <vscale x 4 x i16> @test_vssrl_vv_u16m1(<vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -271,8 +271,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i
 define <vscale x 4 x i16> @test_vssrl_vx_u16m1(<vscale x 4 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -285,8 +285,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16>,
 define <vscale x 8 x i16> @test_vssrl_vv_u16m2(<vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -299,8 +299,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i
 define <vscale x 8 x i16> @test_vssrl_vx_u16m2(<vscale x 8 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -313,8 +313,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16>,
 define <vscale x 16 x i16> @test_vssrl_vv_u16m4(<vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -327,8 +327,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16
 define <vscale x 16 x i16> @test_vssrl_vx_u16m4(<vscale x 16 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -341,8 +341,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i1
 define <vscale x 32 x i16> @test_vssrl_vv_u16m8(<vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -355,8 +355,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32
 define <vscale x 32 x i16> @test_vssrl_vx_u16m8(<vscale x 32 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -369,8 +369,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i1
 define <vscale x 1 x i32> @test_vssrl_vv_u32mf2(<vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -383,8 +383,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i
 define <vscale x 1 x i32> @test_vssrl_vx_u32mf2(<vscale x 1 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -397,8 +397,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32>,
 define <vscale x 2 x i32> @test_vssrl_vv_u32m1(<vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -411,8 +411,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i
 define <vscale x 2 x i32> @test_vssrl_vx_u32m1(<vscale x 2 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -425,8 +425,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32>,
 define <vscale x 4 x i32> @test_vssrl_vv_u32m2(<vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -439,8 +439,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i
 define <vscale x 4 x i32> @test_vssrl_vx_u32m2(<vscale x 4 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -453,8 +453,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32>,
 define <vscale x 8 x i32> @test_vssrl_vv_u32m4(<vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -467,8 +467,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i
 define <vscale x 8 x i32> @test_vssrl_vx_u32m4(<vscale x 8 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -481,8 +481,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32>,
 define <vscale x 16 x i32> @test_vssrl_vv_u32m8(<vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -495,8 +495,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16
 define <vscale x 16 x i32> @test_vssrl_vx_u32m8(<vscale x 16 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -509,8 +509,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i3
 define <vscale x 1 x i64> @test_vssrl_vv_u64m1(<vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:
@@ -523,8 +523,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i
 define <vscale x 1 x i64> @test_vssrl_vx_u64m1(<vscale x 1 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -537,8 +537,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64>,
 define <vscale x 2 x i64> @test_vssrl_vv_u64m2(<vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -551,8 +551,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i
 define <vscale x 2 x i64> @test_vssrl_vx_u64m2(<vscale x 2 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -565,8 +565,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64>,
 define <vscale x 4 x i64> @test_vssrl_vv_u64m4(<vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -579,8 +579,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i
 define <vscale x 4 x i64> @test_vssrl_vx_u64m4(<vscale x 4 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -593,8 +593,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64>,
 define <vscale x 8 x i64> @test_vssrl_vv_u64m8(<vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -607,8 +607,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i
 define <vscale x 8 x i64> @test_vssrl_vx_u64m8(<vscale x 8 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -621,8 +621,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64>,
 define <vscale x 1 x i8> @test_vssrl_vv_u8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, <vscale x 1 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8mf8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -635,8 +635,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x
 define <vscale x 1 x i8> @test_vssrl_vx_u8mf8_m(<vscale x 1 x i1> %mask, <vscale x 1 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8mf8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -649,8 +649,8 @@ declare <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8
 define <vscale x 2 x i8> @test_vssrl_vv_u8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, <vscale x 2 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -663,8 +663,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x
 define <vscale x 2 x i8> @test_vssrl_vx_u8mf4_m(<vscale x 2 x i1> %mask, <vscale x 2 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -677,8 +677,8 @@ declare <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8
 define <vscale x 4 x i8> @test_vssrl_vv_u8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, <vscale x 4 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -691,8 +691,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x
 define <vscale x 4 x i8> @test_vssrl_vx_u8mf2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -705,8 +705,8 @@ declare <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8
 define <vscale x 8 x i8> @test_vssrl_vv_u8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, <vscale x 8 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -719,8 +719,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x
 define <vscale x 8 x i8> @test_vssrl_vx_u8m1_m(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -733,8 +733,8 @@ declare <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8
 define <vscale x 16 x i8> @test_vssrl_vv_u8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, <vscale x 16 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -747,8 +747,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x
 define <vscale x 16 x i8> @test_vssrl_vx_u8m2_m(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -761,8 +761,8 @@ declare <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x
 define <vscale x 32 x i8> @test_vssrl_vv_u8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, <vscale x 32 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -775,8 +775,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x
 define <vscale x 32 x i8> @test_vssrl_vx_u8m4_m(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -789,8 +789,8 @@ declare <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x
 define <vscale x 64 x i8> @test_vssrl_vv_u8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, <vscale x 64 x i8> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u8m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -803,8 +803,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x
 define <vscale x 64 x i8> @test_vssrl_vx_u8m8_m(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u8m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -817,8 +817,8 @@ declare <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x
 define <vscale x 1 x i16> @test_vssrl_vv_u16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, <vscale x 1 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -831,8 +831,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x
 define <vscale x 1 x i16> @test_vssrl_vx_u16mf4_m(<vscale x 1 x i1> %mask, <vscale x 1 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16mf4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -845,8 +845,8 @@ declare <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x
 define <vscale x 2 x i16> @test_vssrl_vv_u16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, <vscale x 2 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -859,8 +859,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x
 define <vscale x 2 x i16> @test_vssrl_vx_u16mf2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -873,8 +873,8 @@ declare <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x
 define <vscale x 4 x i16> @test_vssrl_vv_u16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, <vscale x 4 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -887,8 +887,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x
 define <vscale x 4 x i16> @test_vssrl_vx_u16m1_m(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -901,8 +901,8 @@ declare <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x
 define <vscale x 8 x i16> @test_vssrl_vv_u16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, <vscale x 8 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -915,8 +915,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x
 define <vscale x 8 x i16> @test_vssrl_vx_u16m2_m(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -929,8 +929,8 @@ declare <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x
 define <vscale x 16 x i16> @test_vssrl_vv_u16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, <vscale x 16 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -943,8 +943,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale
 define <vscale x 16 x i16> @test_vssrl_vx_u16m4_m(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -957,8 +957,8 @@ declare <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16
 define <vscale x 32 x i16> @test_vssrl_vv_u16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, <vscale x 32 x i16> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u16m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -971,8 +971,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale
 define <vscale x 32 x i16> @test_vssrl_vx_u16m8_m(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u16m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -985,8 +985,8 @@ declare <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32
 define <vscale x 1 x i32> @test_vssrl_vv_u32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, <vscale x 1 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -999,8 +999,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x
 define <vscale x 1 x i32> @test_vssrl_vx_u32mf2_m(<vscale x 1 x i1> %mask, <vscale x 1 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32mf2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1013,8 +1013,8 @@ declare <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x
 define <vscale x 2 x i32> @test_vssrl_vv_u32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, <vscale x 2 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1027,8 +1027,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x
 define <vscale x 2 x i32> @test_vssrl_vx_u32m1_m(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1041,8 +1041,8 @@ declare <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x
 define <vscale x 4 x i32> @test_vssrl_vv_u32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, <vscale x 4 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1055,8 +1055,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x
 define <vscale x 4 x i32> @test_vssrl_vx_u32m2_m(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1069,8 +1069,8 @@ declare <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x
 define <vscale x 8 x i32> @test_vssrl_vv_u32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, <vscale x 8 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1083,8 +1083,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x
 define <vscale x 8 x i32> @test_vssrl_vx_u32m4_m(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1097,8 +1097,8 @@ declare <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x
 define <vscale x 16 x i32> @test_vssrl_vv_u32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, <vscale x 16 x i32> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u32m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1111,8 +1111,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale
 define <vscale x 16 x i32> @test_vssrl_vx_u32m8_m(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u32m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1125,8 +1125,8 @@ declare <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16
 define <vscale x 1 x i64> @test_vssrl_vv_u64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, <vscale x 1 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1139,8 +1139,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x
 define <vscale x 1 x i64> @test_vssrl_vx_u64m1_m(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m1_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1153,8 +1153,8 @@ declare <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x
 define <vscale x 2 x i64> @test_vssrl_vv_u64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, <vscale x 2 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1167,8 +1167,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x
 define <vscale x 2 x i64> @test_vssrl_vx_u64m2_m(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m2_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1181,8 +1181,8 @@ declare <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x
 define <vscale x 4 x i64> @test_vssrl_vv_u64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, <vscale x 4 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1195,8 +1195,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x
 define <vscale x 4 x i64> @test_vssrl_vx_u64m4_m(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m4_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1209,8 +1209,8 @@ declare <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x
 define <vscale x 8 x i64> @test_vssrl_vv_u64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, <vscale x 8 x i64> %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vv_u64m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
@@ -1223,8 +1223,8 @@ declare <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x
 define <vscale x 8 x i64> @test_vssrl_vx_u64m8_m(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %op1, i64 %shift, i64 %vl) {
 ; CHECK-LABEL: test_vssrl_vx_u64m8_m:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vssrl.vx v8, v8, a0, v0.t
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
new file mode 100644
index 000000000000000..23909b0e849622a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
@@ -0,0 +1,473 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen, iXLen);
+declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen, iXLen);
+
+; Test same rounding mode in one block.
+define <vscale x 1 x i8> @test1(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: test1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  %b = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %a,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+
+  ret <vscale x 1 x i8> %b
+}
+
+; Test different rounding mode.
+define <vscale x 1 x i8> @test2(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: test2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 2, iXLen %3)
+  %b = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %a,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+
+  ret <vscale x 1 x i8> %b
+}
+
+declare <vscale x 1 x i8> @foo(<vscale x 1 x i8>)
+
+; Test same vxrm with call in between which may invalidate vxrm.
+define <vscale x 1 x i8> @test3(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: test3:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 1
+; RV32-NEXT:    sub sp, sp, a1
+; RV32-NEXT:    mv s0, a0
+; RV32-NEXT:    addi a1, sp, 16
+; RV32-NEXT:    vs1r.v v10, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    csrwi vxrm, 0
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; RV32-NEXT:    vaadd.vv v8, v8, v9
+; RV32-NEXT:    call foo at plt
+; RV32-NEXT:    vsetvli zero, s0, e8, mf8, ta, ma
+; RV32-NEXT:    csrwi vxrm, 0
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT:    vaadd.vv v8, v8, v9
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test3:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi sp, sp, -32
+; RV64-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 1
+; RV64-NEXT:    sub sp, sp, a1
+; RV64-NEXT:    mv s0, a0
+; RV64-NEXT:    addi a1, sp, 16
+; RV64-NEXT:    vs1r.v v10, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; RV64-NEXT:    vaadd.vv v8, v8, v9
+; RV64-NEXT:    call foo at plt
+; RV64-NEXT:    vsetvli zero, s0, e8, mf8, ta, ma
+; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    addi a0, sp, 16
+; RV64-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT:    vaadd.vv v8, v8, v9
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 1
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  %b = call <vscale x 1 x i8> @foo(<vscale x 1 x i8> %a)
+  %c = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %b,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+
+  ret <vscale x 1 x i8> %c
+}
+
+; Test same vxrm with asm in between which may invalidate vxrm.
+define <vscale x 1 x i8> @test4(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: test4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  %b = call <vscale x 1 x i8> asm "", "=^vr,0"(<vscale x 1 x i8> %a)
+  %c = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %b,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+
+  ret <vscale x 1 x i8> %c
+}
+
+; Test same rounding mode in triangle.
+define <vscale x 1 x i8> @test5(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3, i1 %cond) nounwind {
+; CHECK-LABEL: test5:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    andi a1, a1, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    beqz a1, .LBB4_2
+; CHECK-NEXT:  # %bb.1: # %condblock
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:  .LBB4_2: # %mergeblock
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  br i1 %cond, label %condblock, label %mergeblock
+
+condblock:
+  %b = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %a,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+mergeblock:
+  %c = phi <vscale x 1 x i8> [%a, %entry], [%b, %condblock]
+
+  ret <vscale x 1 x i8> %c
+}
+
+; Test same rounding mode in diamond with no dominating vxrm.
+define <vscale x 1 x i8> @test6(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3, i1 %cond) nounwind {
+; CHECK-LABEL: test6:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andi a1, a1, 1
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    beqz a1, .LBB5_2
+; CHECK-NEXT:  # %bb.1: # %trueblock
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB5_2: # %falseblock
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  br i1 %cond, label %trueblock, label %falseblock
+
+trueblock:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+falseblock:
+  %b = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+mergeblock:
+  %c = phi <vscale x 1 x i8> [%a, %trueblock], [%b, %falseblock]
+
+  ret <vscale x 1 x i8> %c
+}
+
+; Test same rounding mode in diamond with same dominating vxrm.
+define <vscale x 1 x i8> @test7(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3, i1 %cond) nounwind {
+; CHECK-LABEL: test7:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    andi a1, a1, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    beqz a1, .LBB6_2
+; CHECK-NEXT:  # %bb.1: # %trueblock
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB6_2: # %falseblock
+; CHECK-NEXT:    vasub.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  br i1 %cond, label %trueblock, label %falseblock
+
+trueblock:
+  %b = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %a,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+falseblock:
+  %c = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %a,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+mergeblock:
+  %d = phi <vscale x 1 x i8> [%b, %trueblock], [%c, %falseblock]
+
+  ret <vscale x 1 x i8> %d
+}
+
+; Test same rounding mode in diamond with same vxrm at merge.
+define <vscale x 1 x i8> @test8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3, i1 %cond) nounwind {
+; CHECK-LABEL: test8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andi a1, a1, 1
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    beqz a1, .LBB7_2
+; CHECK-NEXT:  # %bb.1: # %trueblock
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB7_2: # %falseblock
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vasub.vv v8, v8, v9
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  br i1 %cond, label %trueblock, label %falseblock
+
+trueblock:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+falseblock:
+  %b = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+mergeblock:
+  %c = phi <vscale x 1 x i8> [%a, %trueblock], [%b, %falseblock]
+  %d = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %c,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+
+  ret <vscale x 1 x i8> %d
+}
+
+; Test same rounding mode in diamond with different vxrm at merge.
+define <vscale x 1 x i8> @test9(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3, i1 %cond) nounwind {
+; CHECK-LABEL: test9:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andi a1, a1, 1
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    beqz a1, .LBB8_2
+; CHECK-NEXT:  # %bb.1: # %trueblock
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    j .LBB8_3
+; CHECK-NEXT:  .LBB8_2: # %falseblock
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vasub.vv v8, v8, v9
+; CHECK-NEXT:  .LBB8_3: # %mergeblock
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  br i1 %cond, label %trueblock, label %falseblock
+
+trueblock:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+falseblock:
+  %b = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+mergeblock:
+  %c = phi <vscale x 1 x i8> [%a, %trueblock], [%b, %falseblock]
+  %d = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %c,
+    <vscale x 1 x i8> %2,
+    iXLen 2, iXLen %3)
+
+  ret <vscale x 1 x i8> %d
+}
+
+; Test loop with no dominating vxrm write.
+define void @test10(i8* nocapture %ptr_dest, i8* nocapture readonly %ptr_op1, i8* nocapture readonly %ptr_op2, iXLen %n) {
+; CHECK-LABEL: test10:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    beqz a3, .LBB9_3
+; CHECK-NEXT:  # %bb.1: # %for.body.preheader
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:  .LBB9_2: # %for.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vsetvli a4, a3, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a1)
+; CHECK-NEXT:    vle8.v v9, (a2)
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    sub a3, a3, a4
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    bnez a3, .LBB9_2
+; CHECK-NEXT:  .LBB9_3: # %for.end
+; CHECK-NEXT:    ret
+entry:
+  %tobool.not9 = icmp eq iXLen %n, 0
+  br i1 %tobool.not9, label %for.end, label %for.body
+
+for.body:
+  %n.addr.011 = phi iXLen [ %n, %entry ], [ %sub, %for.body ]
+  %vl = tail call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %n.addr.011, iXLen 0, iXLen 5)
+  %load1 = tail call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8> undef, ptr %ptr_op1, iXLen %vl)
+  %load2 = tail call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8> undef, ptr %ptr_op2, iXLen %vl)
+  %vadd = tail call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %load1, <vscale x 1 x i8> %load2, iXLen 2, iXLen %vl)
+  tail call void @llvm.riscv.vse.nxv1i8.iXLen(<vscale x 1 x i8> %vadd, ptr %ptr_dest, iXLen %vl)
+  %sub = sub iXLen %n.addr.011, %vl
+  %tobool.not = icmp eq iXLen %sub, 0
+  br i1 %tobool.not, label %for.end, label %for.body
+
+for.end:
+  ret void
+}
+
+declare iXLen @llvm.riscv.vsetvli.iXLen(iXLen, iXLen immarg, iXLen immarg)
+declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8>, <vscale x 1 x i8>* nocapture, iXLen)
+declare void @llvm.riscv.vse.nxv1i8.iXLen(<vscale x 1 x i8>, <vscale x 1 x i8>* nocapture, iXLen)
+
+; Test loop with dominating vxrm write. Make sure there is no write in the loop.
+define void @test11(i8* nocapture %ptr_dest, i8* nocapture readonly %ptr_op1, i8* nocapture readonly %ptr_op2, iXLen %n) {
+; CHECK-LABEL: test11:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a4, a3, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a1)
+; CHECK-NEXT:    vle8.v v9, (a2)
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:  .LBB10_1: # %for.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    sub a3, a3, a4
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    beqz a3, .LBB10_3
+; CHECK-NEXT:  # %bb.2: # %for.body
+; CHECK-NEXT:    # in Loop: Header=BB10_1 Depth=1
+; CHECK-NEXT:    vsetvli a4, a3, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a1)
+; CHECK-NEXT:    vle8.v v9, (a2)
+; CHECK-NEXT:    j .LBB10_1
+; CHECK-NEXT:  .LBB10_3: # %for.end
+; CHECK-NEXT:    ret
+entry:
+  %vl = tail call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %n, iXLen 0, iXLen 5)
+  %load1a = tail call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8> undef, ptr %ptr_op1, iXLen %vl)
+  %load2a = tail call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8> undef, ptr %ptr_op2, iXLen %vl)
+  %vadda = tail call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %load1a, <vscale x 1 x i8> %load2a, iXLen 2, iXLen %vl)
+  tail call void @llvm.riscv.vse.nxv1i8.iXLen(<vscale x 1 x i8> %vadda, ptr %ptr_dest, iXLen %vl)
+  %suba = sub iXLen %n, %vl
+  %tobool.not9 = icmp eq iXLen %suba, 0
+  br i1 %tobool.not9, label %for.end, label %for.body
+
+for.body:
+  %n.addr.011 = phi iXLen [ %suba, %entry ], [ %sub, %for.body ]
+  %vl2 = tail call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %n.addr.011, iXLen 0, iXLen 5)
+  %load1 = tail call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8> undef, ptr %ptr_op1, iXLen %vl2)
+  %load2 = tail call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8> undef, ptr %ptr_op2, iXLen %vl2)
+  %vadd = tail call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %load1, <vscale x 1 x i8> %load2, iXLen 2, iXLen %vl2)
+  tail call void @llvm.riscv.vse.nxv1i8.iXLen(<vscale x 1 x i8> %vadd, ptr %ptr_dest, iXLen %vl2)
+  %sub = sub iXLen %n.addr.011, %vl2
+  %tobool.not = icmp eq iXLen %sub, 0
+  br i1 %tobool.not, label %for.end, label %for.body
+
+for.end:
+  ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm.mir b/llvm/test/CodeGen/RISCV/rvv/vxrm.mir
index 64e191887e092c4..ae63e664de13c73 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxrm.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm.mir
@@ -11,14 +11,14 @@ body:     |
     ; MIR-LABEL: name: verify_vxrm
     ; MIR: liveins: $v8, $v9, $x10
     ; MIR-NEXT: {{  $}}
-    ; MIR-NEXT: dead $x0 = PseudoVSETVLI renamable $x10, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; MIR-NEXT: WriteVXRMImm 0, implicit-def $vxrm
+    ; MIR-NEXT: dead $x0 = PseudoVSETVLI renamable $x10, 197 /* e8, mf8, ta, ma */, implicit-def $vl, implicit-def $vtype
     ; MIR-NEXT: renamable $v8 = PseudoVAADD_VV_MF8 undef $v8, renamable $v8, renamable $v9, 0, $noreg, 3 /* e8 */, 0  /* tu, mu */, implicit $vl, implicit $vtype, implicit $vxrm
     ; MIR-NEXT: PseudoRET implicit $v8
     ; ASM-LABEL: verify_vxrm:
     ; ASM:        # %bb.0:
-    ; ASM-NEXT:    vsetvli	zero, a0, e8, mf8, ta, ma
     ; ASM-NEXT:    csrwi	vxrm, 0
+    ; ASM-NEXT:    vsetvli	zero, a0, e8, mf8, ta, ma
     ; ASM-NEXT:    vaadd.vv	v8, v8, v9
     ; ASM-NEXT:    ret
     %0:vr = COPY $v8

>From 834e97f5bb793d6a66ed4c29f105848d9b7a6788 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 30 Oct 2023 10:53:05 -0700
Subject: [PATCH 2/9] Address code review comments.

---
 llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
index 8313a809b84cbcb..d3f0b3604ce3194 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
@@ -325,7 +325,7 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {
   // Insert VXRM write if anticipated and not available.
   if (BBInfo.AnticipatedIn.isStatic()) {
     bool NeedInsert = false;
-    // If there no predecessors and the value is anticipated, insert.
+    // If there are no predecessors and the value is anticipated, insert.
     if (MBB.pred_empty()) {
       NeedInsert = true;
     } else {
@@ -353,6 +353,9 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {
     }
 
     if (NeedInsert) {
+      LLVM_DEBUG(dbgs() << "Inserting at beginning of "
+                        << printMBBReference(MBB) << " changing to "
+                        << BBInfo.AnticipatedIn << "\n");
       BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(),
               TII->get(RISCV::WriteVXRMImm))
           .addImm(BBInfo.AnticipatedIn.getVXRMImm());

>From c2c0b7c1898ad75253ab63238a982dd166c38c7a Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 30 Oct 2023 11:32:14 -0700
Subject: [PATCH 3/9] Add some critical edge tests.

---
 llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll | 76 ++++++++++++++++++++++
 1 file changed, 76 insertions(+)

diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
index 23909b0e849622a..e7ae4a4ac3c6376 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
@@ -471,3 +471,79 @@ for.body:
 for.end:
   ret void
 }
+
+; The edge from entry to block2 is a critical edge. The vxrm write in block2
+; is redundant when coming from block1, but is needed when coming from entry.
+; FIXME: We could remove the write from the end of block1 without splitting the
+; critical edge.
+define <vscale x 1 x i8> @test12(i1 %c1, <vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %vl) {
+; CHECK-LABEL: test12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    vaadd.vv v9, v8, v9
+; CHECK-NEXT:    beqz a0, .LBB11_2
+; CHECK-NEXT:  # %bb.1: # %block1
+; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vaadd.vv v9, v8, v9
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:  .LBB11_2: # %block2
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen 0, iXLen %vl)
+  br i1 %c1, label %block1, label %block2
+
+block1:
+  %b = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %0, <vscale x 1 x i8> %a, iXLen 1, iXLen %vl)
+  br label %block2
+
+block2:
+  %c = phi <vscale x 1 x i8> [ %a, %entry ], [ %b, %block1]
+  %d = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %0, <vscale x 1 x i8> %c, iXLen 2, iXLen %vl)
+  ret <vscale x 1 x i8> %d
+}
+
+; Similar to test12, but introduces a second critical edge from block1 to
+; block3. Now the write to vxrm at the end of block1, can't be removed because
+; it is needed by block3.
+define <vscale x 1 x i8> @test13(i1 %c1, i1 %c2, i1 %c3, <vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %vl) {
+; CHECK-LABEL: test13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    vsetvli zero, a3, e8, mf8, ta, ma
+; CHECK-NEXT:    vaadd.vv v10, v8, v9
+; CHECK-NEXT:    beqz a0, .LBB12_2
+; CHECK-NEXT:  # %bb.1: # %block1
+; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vaadd.vv v10, v8, v10
+; CHECK-NEXT:    andi a1, a1, 1
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    beqz a1, .LBB12_3
+; CHECK-NEXT:  .LBB12_2: # %block2
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB12_3: # %block3
+; CHECK-NEXT:    vaadd.vv v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen 0, iXLen %vl)
+  br i1 %c1, label %block1, label %block2
+
+block1:
+  %b = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %0, <vscale x 1 x i8> %a, iXLen 1, iXLen %vl)
+  br i1 %c2, label %block2, label %block3
+
+block2:
+  %c = phi <vscale x 1 x i8> [ %a, %entry ], [ %b, %block1]
+  %d = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %0, <vscale x 1 x i8> %c, iXLen 2, iXLen %vl)
+  ret <vscale x 1 x i8> %d
+
+block3:
+  %e = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %1, <vscale x 1 x i8> %b, iXLen 2, iXLen %vl)
+  ret <vscale x 1 x i8> %e
+}

>From 1dbc87ccce2b8d86a92acb3d5fb0605009c68de1 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 30 Oct 2023 20:03:14 -0700
Subject: [PATCH 4/9] Address review comment

---
 llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp | 16 ++++++----------
 1 file changed, 6 insertions(+), 10 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
index d3f0b3604ce3194..f729e72863af0d8 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
@@ -74,18 +74,14 @@ class VXRMInfo {
 
   bool operator==(const VXRMInfo &Other) const {
     // Uninitialized is only equal to another Uninitialized.
-    if (!isValid())
-      return !Other.isValid();
-    if (!Other.isValid())
-      return !isValid();
+    if (State != Other.State)
+      return false;
 
-    // Unknown is only equal to another Unknown.
-    if (isUnknown())
-      return Other.isUnknown();
-    if (Other.isUnknown())
-      return isUnknown();
+    if (isStatic())
+      return VXRMImm == Other.VXRMImm;
 
-    return VXRMImm == Other.VXRMImm;
+    assert((isValid() || isUnknown()) && "Unexpected state");
+    return true;
   }
 
   bool operator!=(const VXRMInfo &Other) const { return !(*this == Other); }

>From 71c9f569fd5d25b17edf1d83c789f096bd62fd7a Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 1 Nov 2023 15:33:22 -0700
Subject: [PATCH 5/9] Use early out.

---
 .../lib/Target/RISCV/RISCVInsertWriteVXRM.cpp | 53 ++++++++++---------
 1 file changed, 28 insertions(+), 25 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
index f729e72863af0d8..08ae913de124867 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
@@ -418,37 +418,40 @@ bool RISCVInsertWriteVXRM::runOnMachineFunction(MachineFunction &MF) {
   for (const MachineBasicBlock &MBB : MF)
     NeedVXRMChange |= computeVXRMChanges(MBB);
 
-  if (NeedVXRMChange) {
-    // Phase 2 - Compute available VXRM using a forward walk.
-    for (const MachineBasicBlock &MBB : MF) {
-      WorkList.push(&MBB);
-      BlockInfo[MBB.getNumber()].InQueue = true;
-    }
-    while (!WorkList.empty()) {
-      const MachineBasicBlock &MBB = *WorkList.front();
-      WorkList.pop();
-      computeAvailable(MBB);
-    }
+  if (!NeedVXRMChange) {
+    BlockInfo.clear();
+    return false;
+  }
 
-    // Phase 3 - Compute anticipated VXRM using a backwards walk.
-    for (const MachineBasicBlock &MBB : llvm::reverse(MF)) {
-      WorkList.push(&MBB);
-      BlockInfo[MBB.getNumber()].InQueue = true;
-    }
-    while (!WorkList.empty()) {
-      const MachineBasicBlock &MBB = *WorkList.front();
-      WorkList.pop();
-      computeAnticipated(MBB);
-    }
+  // Phase 2 - Compute available VXRM using a forward walk.
+  for (const MachineBasicBlock &MBB : MF) {
+    WorkList.push(&MBB);
+    BlockInfo[MBB.getNumber()].InQueue = true;
+  }
+  while (!WorkList.empty()) {
+    const MachineBasicBlock &MBB = *WorkList.front();
+    WorkList.pop();
+    computeAvailable(MBB);
+  }
 
-    // Phase 4 - Emit VXRM writes at the earliest place possible.
-    for (MachineBasicBlock &MBB : MF)
-      emitWriteVXRM(MBB);
+  // Phase 3 - Compute anticipated VXRM using a backwards walk.
+  for (const MachineBasicBlock &MBB : llvm::reverse(MF)) {
+    WorkList.push(&MBB);
+    BlockInfo[MBB.getNumber()].InQueue = true;
   }
+  while (!WorkList.empty()) {
+    const MachineBasicBlock &MBB = *WorkList.front();
+    WorkList.pop();
+    computeAnticipated(MBB);
+  }
+
+  // Phase 4 - Emit VXRM writes at the earliest place possible.
+  for (MachineBasicBlock &MBB : MF)
+    emitWriteVXRM(MBB);
 
   BlockInfo.clear();
 
-  return NeedVXRMChange;
+  return true;
 }
 
 FunctionPass *llvm::createRISCVInsertWriteVXRMPass() {

>From b7af9ca4e388d08b7fd6a5f07a0356c45a567938 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 1 Nov 2023 15:35:07 -0700
Subject: [PATCH 6/9] Reword comment

---
 llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
index 08ae913de124867..32acdca6062e714 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
@@ -337,8 +337,9 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {
             PInfo.AvailableOut.getVXRMImm() ==
                 BBInfo.AnticipatedIn.getVXRMImm())
           continue;
-        // If the predecessor anticipated this value for all its succesors,
-        // then it should have already inserted.
+        // If the predecessor anticipates this value for all its succesors,
+        // then a write to VXRM would have already occured before this block is
+        // executed.
         if (PInfo.AnticipatedOut.isStatic() &&
             PInfo.AnticipatedOut.getVXRMImm() ==
                 BBInfo.AnticipatedIn.getVXRMImm())

>From 8faa4300e261883b1b08b7ce0958991f0c5a2d0f Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 1 Nov 2023 15:44:54 -0700
Subject: [PATCH 7/9] Add modifesRegister to computeVXRMChanges

---
 llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
index 32acdca6062e714..82733da50342257 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
@@ -216,7 +216,7 @@ bool RISCVInsertWriteVXRM::computeVXRMChanges(const MachineBasicBlock &MBB) {
       NeedVXRMWrite = true;
     }
 
-    if (MI.isCall() || MI.isInlineAsm()) {
+    if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VXRM)) {
       if (!BBInfo.VXRMUse.isValid())
         BBInfo.VXRMUse.setUnknown();
 

>From da77cf1c5da7cd68a1afe232b30e92178400572d Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 1 Nov 2023 15:51:02 -0700
Subject: [PATCH 8/9] [RISCV] Add check for WriteVXRMImm to computeVXRMChanges.

---
 llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
index 82733da50342257..af8d4818a962c48 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
@@ -214,6 +214,12 @@ bool RISCVInsertWriteVXRM::computeVXRMChanges(const MachineBasicBlock &MBB) {
 
       BBInfo.VXRMOut.setVXRMImm(NewVXRMImm);
       NeedVXRMWrite = true;
+      continue;
+    }
+
+    if (MI.getOpcode() == RISCV::WriteVXRMImm) {
+      BBInfo.VXRMOut.setVXRMImm(MI.getOperand(0).getImm());
+      continue;
     }
 
     if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VXRM)) {

>From 4ea78b41dbc3541e016da6c9190912dac637bb07 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 1 Nov 2023 16:49:53 -0700
Subject: [PATCH 9/9] Change pred_empty() to isEntryBlock()

---
 llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
index af8d4818a962c48..723e6ee78007c65 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
@@ -327,8 +327,8 @@ void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {
   // Insert VXRM write if anticipated and not available.
   if (BBInfo.AnticipatedIn.isStatic()) {
     bool NeedInsert = false;
-    // If there are no predecessors and the value is anticipated, insert.
-    if (MBB.pred_empty()) {
+    // If this is the entry block and the value is anticipated, insert.
+    if (MBB.isEntryBlock()) {
       NeedInsert = true;
     } else {
       // Search for any predecessors that wouldn't satisfy our requirement and



More information about the llvm-commits mailing list