[llvm] 014390d - [RISCV] Implement cross basic block VXRM write insertion. (#70382)

via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 2 14:09:31 PDT 2023


Author: Craig Topper
Date: 2023-11-02T14:09:27-07:00
New Revision: 014390d9377ffa7c02a27eae7dca3b4e5967aeb6

URL: https://github.com/llvm/llvm-project/commit/014390d9377ffa7c02a27eae7dca3b4e5967aeb6
DIFF: https://github.com/llvm/llvm-project/commit/014390d9377ffa7c02a27eae7dca3b4e5967aeb6.diff

LOG: [RISCV] Implement cross basic block VXRM write insertion. (#70382)

This adds a new pass to insert VXRM writes for vector instructions. With
the goal of avoiding redundant writes.

The pass does 2 dataflow algorithms. The first is a forward data flow to
calculate where a VXRM value is available. The second is a backwards
dataflow to determine where a VXRM value is anticipated.

Finally, we use the results of these two dataflows to insert VXRM writes
where a value is anticipated, but not available.

The pass does not split critical edges so we aren't always able to
eliminate all redundancy.

The pass will only insert vxrm writes on paths that always require it.

Added: 
    llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
    llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll

Modified: 
    llvm/lib/Target/RISCV/CMakeLists.txt
    llvm/lib/Target/RISCV/RISCV.h
    llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
    llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
    llvm/test/CodeGen/RISCV/O0-pipeline.ll
    llvm/test/CodeGen/RISCV/O3-pipeline.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt
index b0282b72c6a8dba..afd345e1b3ebadb 100644
--- a/llvm/lib/Target/RISCV/CMakeLists.txt
+++ b/llvm/lib/Target/RISCV/CMakeLists.txt
@@ -38,6 +38,7 @@ add_llvm_target(RISCVCodeGen
   RISCVGatherScatterLowering.cpp
   RISCVInsertVSETVLI.cpp
   RISCVInsertReadWriteCSR.cpp
+  RISCVInsertWriteVXRM.cpp
   RISCVInstrInfo.cpp
   RISCVISelDAGToDAG.cpp
   RISCVISelLowering.cpp

diff  --git a/llvm/lib/Target/RISCV/RISCV.h b/llvm/lib/Target/RISCV/RISCV.h
index 4e870d444120c21..ad1713cad64cfec 100644
--- a/llvm/lib/Target/RISCV/RISCV.h
+++ b/llvm/lib/Target/RISCV/RISCV.h
@@ -71,6 +71,9 @@ void initializeRISCVPostRAExpandPseudoPass(PassRegistry &);
 FunctionPass *createRISCVInsertReadWriteCSRPass();
 void initializeRISCVInsertReadWriteCSRPass(PassRegistry &);
 
+FunctionPass *createRISCVInsertWriteVXRMPass();
+void initializeRISCVInsertWriteVXRMPass(PassRegistry &);
+
 FunctionPass *createRISCVRedundantCopyEliminationPass();
 void initializeRISCVRedundantCopyEliminationPass(PassRegistry &);
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
index 75f5ac3fbe0dd55..acd19bf1b8a162e 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertReadWriteCSR.cpp
@@ -9,7 +9,6 @@
 // of the RISC-V instructions.
 //
 // Currently the pass implements:
-// -Naive insertion of a write to vxrm before an RVV fixed-point instruction.
 // -Writing and saving frm before an RVV floating-point instruction with a
 //  static rounding mode and restores the value after.
 //
@@ -58,25 +57,11 @@ char RISCVInsertReadWriteCSR::ID = 0;
 INITIALIZE_PASS(RISCVInsertReadWriteCSR, DEBUG_TYPE,
                 RISCV_INSERT_READ_WRITE_CSR_NAME, false, false)
 
-// This function inserts a write to vxrm when encountering an RVV fixed-point
-// instruction. This function also swaps frm and restores it when encountering
-// an RVV floating point instruction with a static rounding mode.
+// This function also swaps frm and restores it when encountering an RVV
+// floating point instruction with a static rounding mode.
 bool RISCVInsertReadWriteCSR::emitWriteRoundingMode(MachineBasicBlock &MBB) {
   bool Changed = false;
   for (MachineInstr &MI : MBB) {
-    int VXRMIdx = RISCVII::getVXRMOpNum(MI.getDesc());
-    if (VXRMIdx >= 0) {
-      unsigned VXRMImm = MI.getOperand(VXRMIdx).getImm();
-
-      Changed = true;
-
-      BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::WriteVXRMImm))
-          .addImm(VXRMImm);
-      MI.addOperand(MachineOperand::CreateReg(RISCV::VXRM, /*IsDef*/ false,
-                                              /*IsImp*/ true));
-      continue;
-    }
-
     int FRMIdx = RISCVII::getFRMOpNum(MI.getDesc());
     if (FRMIdx < 0)
       continue;

diff  --git a/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
new file mode 100644
index 000000000000000..70c2415465f8cc4
--- /dev/null
+++ b/llvm/lib/Target/RISCV/RISCVInsertWriteVXRM.cpp
@@ -0,0 +1,460 @@
+//===-- RISCVInsertWriteVXRM.cpp - Insert Write of RISC-V VXRM CSR --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass inserts writes to the VXRM CSR as needed by vector instructions.
+// Each instruction that uses VXRM carries an operand that contains its required
+// VXRM value. This pass tries to optimize placement to avoid redundant writes
+// to VXRM.
+//
+// This is done using 2 dataflow algorithms. The first is a forward data flow
+// to calculate where a VXRM value is available. The second is a backwards
+// dataflow to determine where a VXRM value is anticipated.
+//
+// Finally, we use the results of these two dataflows to insert VXRM writes
+// where a value is anticipated, but not available.
+//
+// FIXME: This pass does not split critical edges, so there can still be some
+// redundancy.
+//
+// FIXME: If we are willing to have writes that aren't always needed, we could
+// reduce the number of VXRM writes in some cases.
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/RISCVBaseInfo.h"
+#include "RISCV.h"
+#include "RISCVSubtarget.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include <queue>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-insert-write-vxrm"
+#define RISCV_INSERT_WRITE_VXRM_NAME "RISC-V Insert Write VXRM Pass"
+
+namespace {
+
+class VXRMInfo {
+  uint8_t VXRMImm = 0;
+
+  enum : uint8_t {
+    Uninitialized,
+    Static,
+    Unknown,
+  } State = Uninitialized;
+
+public:
+  VXRMInfo() {}
+
+  static VXRMInfo getUnknown() {
+    VXRMInfo Info;
+    Info.setUnknown();
+    return Info;
+  }
+
+  bool isValid() const { return State != Uninitialized; }
+  void setUnknown() { State = Unknown; }
+  bool isUnknown() const { return State == Unknown; }
+
+  bool isStatic() const { return State == Static; }
+
+  void setVXRMImm(unsigned Imm) {
+    assert(Imm <= 3 && "Unexpected VXRM value");
+    VXRMImm = Imm;
+    State = Static;
+  }
+  unsigned getVXRMImm() const {
+    assert(isStatic() && VXRMImm <= 3 && "Unexpected state");
+    return VXRMImm;
+  }
+
+  bool operator==(const VXRMInfo &Other) const {
+    // Uninitialized is only equal to another Uninitialized.
+    if (State != Other.State)
+      return false;
+
+    if (isStatic())
+      return VXRMImm == Other.VXRMImm;
+
+    assert((isValid() || isUnknown()) && "Unexpected state");
+    return true;
+  }
+
+  bool operator!=(const VXRMInfo &Other) const { return !(*this == Other); }
+
+  // Calculate the VXRMInfo visible to a block assuming this and Other are
+  // both predecessors.
+  VXRMInfo intersect(const VXRMInfo &Other) const {
+    // If the new value isn't valid, ignore it.
+    if (!Other.isValid())
+      return *this;
+
+    // If this value isn't valid, this must be the first predecessor, use it.
+    if (!isValid())
+      return Other;
+
+    // If either is unknown, the result is unknown.
+    if (isUnknown() || Other.isUnknown())
+      return VXRMInfo::getUnknown();
+
+    // If we have an exact match, return this.
+    if (*this == Other)
+      return *this;
+
+    // Otherwise the result is unknown.
+    return VXRMInfo::getUnknown();
+  }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+  /// Support for debugging, callable in GDB: V->dump()
+  LLVM_DUMP_METHOD void dump() const {
+    print(dbgs());
+    dbgs() << "\n";
+  }
+
+  void print(raw_ostream &OS) const {
+    OS << '{';
+    if (!isValid())
+      OS << "Uninitialized";
+    else if (isUnknown())
+      OS << "Unknown";
+    else
+      OS << getVXRMImm();
+    OS << '}';
+  }
+#endif
+};
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_ATTRIBUTE_USED
+inline raw_ostream &operator<<(raw_ostream &OS, const VXRMInfo &V) {
+  V.print(OS);
+  return OS;
+}
+#endif
+
+struct BlockData {
+  // Indicates if the block uses VXRM. Uninitialized means no use.
+  VXRMInfo VXRMUse;
+
+  // Indicates the VXRM output from the block. Unitialized means transparent.
+  VXRMInfo VXRMOut;
+
+  // Keeps track of the available VXRM value at the start of the basic bloc.
+  VXRMInfo AvailableIn;
+
+  // Keeps track of the available VXRM value at the end of the basic block.
+  VXRMInfo AvailableOut;
+
+  // Keeps track of what VXRM is anticipated at the start of the basic block.
+  VXRMInfo AnticipatedIn;
+
+  // Keeps track of what VXRM is anticipated at the end of the basic block.
+  VXRMInfo AnticipatedOut;
+
+  // Keeps track of whether the block is already in the queue.
+  bool InQueue;
+
+  BlockData() = default;
+};
+
+class RISCVInsertWriteVXRM : public MachineFunctionPass {
+  const TargetInstrInfo *TII;
+
+  std::vector<BlockData> BlockInfo;
+  std::queue<const MachineBasicBlock *> WorkList;
+
+public:
+  static char ID;
+
+  RISCVInsertWriteVXRM() : MachineFunctionPass(ID) {
+    initializeRISCVInsertWriteVXRMPass(*PassRegistry::getPassRegistry());
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesCFG();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  StringRef getPassName() const override {
+    return RISCV_INSERT_WRITE_VXRM_NAME;
+  }
+
+private:
+  bool computeVXRMChanges(const MachineBasicBlock &MBB);
+  void computeAvailable(const MachineBasicBlock &MBB);
+  void computeAnticipated(const MachineBasicBlock &MBB);
+  void emitWriteVXRM(MachineBasicBlock &MBB);
+};
+
+} // end anonymous namespace
+
+char RISCVInsertWriteVXRM::ID = 0;
+
+INITIALIZE_PASS(RISCVInsertWriteVXRM, DEBUG_TYPE, RISCV_INSERT_WRITE_VXRM_NAME,
+                false, false)
+
+bool RISCVInsertWriteVXRM::computeVXRMChanges(const MachineBasicBlock &MBB) {
+  BlockData &BBInfo = BlockInfo[MBB.getNumber()];
+
+  bool NeedVXRMWrite = false;
+  for (const MachineInstr &MI : MBB) {
+    int VXRMIdx = RISCVII::getVXRMOpNum(MI.getDesc());
+    if (VXRMIdx >= 0) {
+      unsigned NewVXRMImm = MI.getOperand(VXRMIdx).getImm();
+
+      if (!BBInfo.VXRMUse.isValid())
+        BBInfo.VXRMUse.setVXRMImm(NewVXRMImm);
+
+      BBInfo.VXRMOut.setVXRMImm(NewVXRMImm);
+      NeedVXRMWrite = true;
+      continue;
+    }
+
+    if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VXRM)) {
+      if (!BBInfo.VXRMUse.isValid())
+        BBInfo.VXRMUse.setUnknown();
+
+      BBInfo.VXRMOut.setUnknown();
+    }
+  }
+
+  return NeedVXRMWrite;
+}
+
+void RISCVInsertWriteVXRM::computeAvailable(const MachineBasicBlock &MBB) {
+  BlockData &BBInfo = BlockInfo[MBB.getNumber()];
+
+  BBInfo.InQueue = false;
+
+  VXRMInfo Available;
+  if (MBB.pred_empty()) {
+    Available.setUnknown();
+  } else {
+    for (const MachineBasicBlock *P : MBB.predecessors())
+      Available = Available.intersect(BlockInfo[P->getNumber()].AvailableOut);
+  }
+
+  // If we don't have any valid available info, wait until we do.
+  if (!Available.isValid())
+    return;
+
+  if (Available != BBInfo.AvailableIn) {
+    BBInfo.AvailableIn = Available;
+    LLVM_DEBUG(dbgs() << "AvailableIn state of " << printMBBReference(MBB)
+                      << " changed to " << BBInfo.AvailableIn << "\n");
+  }
+
+  if (BBInfo.VXRMOut.isValid())
+    Available = BBInfo.VXRMOut;
+
+  if (Available == BBInfo.AvailableOut)
+    return;
+
+  BBInfo.AvailableOut = Available;
+  LLVM_DEBUG(dbgs() << "AvailableOut state of " << printMBBReference(MBB)
+                    << " changed to " << BBInfo.AvailableOut << "\n");
+
+  // Add the successors to the work list so that we can propagate.
+  for (MachineBasicBlock *S : MBB.successors()) {
+    if (!BlockInfo[S->getNumber()].InQueue) {
+      BlockInfo[S->getNumber()].InQueue = true;
+      WorkList.push(S);
+    }
+  }
+}
+
+void RISCVInsertWriteVXRM::computeAnticipated(const MachineBasicBlock &MBB) {
+  BlockData &BBInfo = BlockInfo[MBB.getNumber()];
+
+  BBInfo.InQueue = false;
+
+  VXRMInfo Anticipated;
+  if (MBB.succ_empty()) {
+    Anticipated.setUnknown();
+  } else {
+    for (const MachineBasicBlock *S : MBB.successors())
+      Anticipated =
+          Anticipated.intersect(BlockInfo[S->getNumber()].AnticipatedIn);
+  }
+
+  // If we don't have any valid anticipated info, wait until we do.
+  if (!Anticipated.isValid())
+    return;
+
+  if (Anticipated != BBInfo.AnticipatedOut) {
+    BBInfo.AnticipatedOut = Anticipated;
+    LLVM_DEBUG(dbgs() << "AnticipatedOut state of " << printMBBReference(MBB)
+                      << " changed to " << BBInfo.AnticipatedOut << "\n");
+  }
+
+  // If this block reads VXRM, copy it.
+  if (BBInfo.VXRMUse.isValid())
+    Anticipated = BBInfo.VXRMUse;
+
+  if (Anticipated == BBInfo.AnticipatedIn)
+    return;
+
+  BBInfo.AnticipatedIn = Anticipated;
+  LLVM_DEBUG(dbgs() << "AnticipatedIn state of " << printMBBReference(MBB)
+                    << " changed to " << BBInfo.AnticipatedIn << "\n");
+
+  // Add the predecessors to the work list so that we can propagate.
+  for (MachineBasicBlock *P : MBB.predecessors()) {
+    if (!BlockInfo[P->getNumber()].InQueue) {
+      BlockInfo[P->getNumber()].InQueue = true;
+      WorkList.push(P);
+    }
+  }
+}
+
+void RISCVInsertWriteVXRM::emitWriteVXRM(MachineBasicBlock &MBB) {
+  const BlockData &BBInfo = BlockInfo[MBB.getNumber()];
+
+  VXRMInfo Info = BBInfo.AvailableIn;
+
+  // Flag to indicates we need to insert a VXRM write. We want to delay it as
+  // late as possible in this block.
+  bool PendingInsert = false;
+
+  // Insert VXRM write if anticipated and not available.
+  if (BBInfo.AnticipatedIn.isStatic()) {
+    // If this is the entry block and the value is anticipated, insert.
+    if (MBB.isEntryBlock()) {
+      PendingInsert = true;
+    } else {
+      // Search for any predecessors that wouldn't satisfy our requirement and
+      // insert a write VXRM if needed.
+      // NOTE: If one predecessor is able to provide the requirement, but
+      // another isn't, it means we have a critical edge. The better placement
+      // would be to split the critical edge.
+      for (MachineBasicBlock *P : MBB.predecessors()) {
+        const BlockData &PInfo = BlockInfo[P->getNumber()];
+        // If it's available out of the predecessor, then we're ok.
+        if (PInfo.AvailableOut.isStatic() &&
+            PInfo.AvailableOut.getVXRMImm() ==
+                BBInfo.AnticipatedIn.getVXRMImm())
+          continue;
+        // If the predecessor anticipates this value for all its succesors,
+        // then a write to VXRM would have already occured before this block is
+        // executed.
+        if (PInfo.AnticipatedOut.isStatic() &&
+            PInfo.AnticipatedOut.getVXRMImm() ==
+                BBInfo.AnticipatedIn.getVXRMImm())
+          continue;
+        PendingInsert = true;
+        break;
+      }
+    }
+
+    Info = BBInfo.AnticipatedIn;
+  }
+
+  for (MachineInstr &MI : MBB) {
+    int VXRMIdx = RISCVII::getVXRMOpNum(MI.getDesc());
+    if (VXRMIdx >= 0) {
+      unsigned NewVXRMImm = MI.getOperand(VXRMIdx).getImm();
+
+      if (PendingInsert || !Info.isStatic() ||
+          Info.getVXRMImm() != NewVXRMImm) {
+        assert((!PendingInsert ||
+                (Info.isStatic() && Info.getVXRMImm() == NewVXRMImm)) &&
+               "Pending VXRM insertion mismatch");
+        LLVM_DEBUG(dbgs() << "Inserting before "; MI.print(dbgs()));
+        BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(RISCV::WriteVXRMImm))
+            .addImm(NewVXRMImm);
+        PendingInsert = false;
+      }
+
+      MI.addOperand(MachineOperand::CreateReg(RISCV::VXRM, /*IsDef*/ false,
+                                              /*IsImp*/ true));
+      Info.setVXRMImm(NewVXRMImm);
+      continue;
+    }
+
+    if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VXRM))
+      Info.setUnknown();
+  }
+
+  // If all our successors anticipate a value, do the insert.
+  // NOTE: It's possible that not all predecessors of our successor provide the
+  // correct value. This can occur on critical edges. If we don't split the
+  // critical edge we'll also have a write vxrm in the succesor that is
+  // redundant with this one.
+  if (PendingInsert ||
+      (BBInfo.AnticipatedOut.isStatic() &&
+       (!Info.isStatic() ||
+        Info.getVXRMImm() != BBInfo.AnticipatedOut.getVXRMImm()))) {
+    assert((!PendingInsert ||
+            (Info.isStatic() && BBInfo.AnticipatedOut.isStatic() &&
+             Info.getVXRMImm() == BBInfo.AnticipatedOut.getVXRMImm())) &&
+           "Pending VXRM insertion mismatch");
+    LLVM_DEBUG(dbgs() << "Inserting at end of " << printMBBReference(MBB)
+                      << " changing to " << BBInfo.AnticipatedOut << "\n");
+    BuildMI(MBB, MBB.getFirstTerminator(), DebugLoc(),
+            TII->get(RISCV::WriteVXRMImm))
+        .addImm(BBInfo.AnticipatedOut.getVXRMImm());
+  }
+}
+
+bool RISCVInsertWriteVXRM::runOnMachineFunction(MachineFunction &MF) {
+  // Skip if the vector extension is not enabled.
+  const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
+  if (!ST.hasVInstructions())
+    return false;
+
+  TII = ST.getInstrInfo();
+
+  assert(BlockInfo.empty() && "Expect empty block infos");
+  BlockInfo.resize(MF.getNumBlockIDs());
+
+  // Phase 1 - collect block information.
+  bool NeedVXRMChange = false;
+  for (const MachineBasicBlock &MBB : MF)
+    NeedVXRMChange |= computeVXRMChanges(MBB);
+
+  if (!NeedVXRMChange) {
+    BlockInfo.clear();
+    return false;
+  }
+
+  // Phase 2 - Compute available VXRM using a forward walk.
+  for (const MachineBasicBlock &MBB : MF) {
+    WorkList.push(&MBB);
+    BlockInfo[MBB.getNumber()].InQueue = true;
+  }
+  while (!WorkList.empty()) {
+    const MachineBasicBlock &MBB = *WorkList.front();
+    WorkList.pop();
+    computeAvailable(MBB);
+  }
+
+  // Phase 3 - Compute anticipated VXRM using a backwards walk.
+  for (const MachineBasicBlock &MBB : llvm::reverse(MF)) {
+    WorkList.push(&MBB);
+    BlockInfo[MBB.getNumber()].InQueue = true;
+  }
+  while (!WorkList.empty()) {
+    const MachineBasicBlock &MBB = *WorkList.front();
+    WorkList.pop();
+    computeAnticipated(MBB);
+  }
+
+  // Phase 4 - Emit VXRM writes at the earliest place possible.
+  for (MachineBasicBlock &MBB : MF)
+    emitWriteVXRM(MBB);
+
+  BlockInfo.clear();
+
+  return true;
+}
+
+FunctionPass *llvm::createRISCVInsertWriteVXRMPass() {
+  return new RISCVInsertWriteVXRM();
+}

diff  --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 85683a3adc968df..15b66f13909b6d9 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -104,6 +104,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
   initializeRISCVFoldMasksPass(*PR);
   initializeRISCVInsertVSETVLIPass(*PR);
   initializeRISCVInsertReadWriteCSRPass(*PR);
+  initializeRISCVInsertWriteVXRMPass(*PR);
   initializeRISCVDAGToDAGISelPass(*PR);
   initializeRISCVInitUndefPass(*PR);
   initializeRISCVMoveMergePass(*PR);
@@ -436,6 +437,7 @@ void RISCVPassConfig::addPreRegAlloc() {
       EnableRISCVDeadRegisterElimination)
     addPass(createRISCVDeadRegisterDefinitionsPass());
   addPass(createRISCVInsertReadWriteCSRPass());
+  addPass(createRISCVInsertWriteVXRMPass());
 }
 
 void RISCVPassConfig::addOptimizedRegAlloc() {

diff  --git a/llvm/test/CodeGen/RISCV/O0-pipeline.ll b/llvm/test/CodeGen/RISCV/O0-pipeline.ll
index 1d9af9df2f718f0..e01d2d45263434e 100644
--- a/llvm/test/CodeGen/RISCV/O0-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O0-pipeline.ll
@@ -42,6 +42,7 @@
 ; CHECK-NEXT:       RISC-V Pre-RA pseudo instruction expansion pass
 ; CHECK-NEXT:       RISC-V Insert VSETVLI pass
 ; CHECK-NEXT:       RISC-V Insert Read/Write CSR Pass
+; CHECK-NEXT:       RISC-V Insert Write VXRM Pass
 ; CHECK-NEXT:       RISC-V init undef pass
 ; CHECK-NEXT:       Eliminate PHI nodes for register allocation
 ; CHECK-NEXT:       Two-Address instruction pass

diff  --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
index 414b721661021fd..5945997bf9507e5 100644
--- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
@@ -111,6 +111,7 @@
 ; CHECK-NEXT:       RISC-V Insert VSETVLI pass
 ; CHECK-NEXT:       RISC-V Dead register definitions
 ; CHECK-NEXT:       RISC-V Insert Read/Write CSR Pass
+; CHECK-NEXT:       RISC-V Insert Write VXRM Pass
 ; CHECK-NEXT:       Detect Dead Lanes
 ; CHECK-NEXT:       RISC-V init undef pass
 ; CHECK-NEXT:       Process Implicit Definitions

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
new file mode 100644
index 000000000000000..e323bc69ee90085
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
@@ -0,0 +1,549 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen, iXLen);
+declare <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>,
+  iXLen, iXLen);
+
+; Test same rounding mode in one block.
+define <vscale x 1 x i8> @test1(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: test1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  %b = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %a,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+
+  ret <vscale x 1 x i8> %b
+}
+
+; Test 
diff erent rounding mode.
+define <vscale x 1 x i8> @test2(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: test2:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 2, iXLen %3)
+  %b = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %a,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+
+  ret <vscale x 1 x i8> %b
+}
+
+declare <vscale x 1 x i8> @foo(<vscale x 1 x i8>)
+
+; Test same vxrm with call in between which may invalidate vxrm.
+define <vscale x 1 x i8> @test3(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; RV32-LABEL: test3:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    addi sp, sp, -32
+; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a1, a1, 1
+; RV32-NEXT:    sub sp, sp, a1
+; RV32-NEXT:    mv s0, a0
+; RV32-NEXT:    addi a1, sp, 16
+; RV32-NEXT:    vs1r.v v10, (a1) # Unknown-size Folded Spill
+; RV32-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; RV32-NEXT:    csrwi vxrm, 0
+; RV32-NEXT:    vaadd.vv v8, v8, v9
+; RV32-NEXT:    call foo at plt
+; RV32-NEXT:    vsetvli zero, s0, e8, mf8, ta, ma
+; RV32-NEXT:    csrwi vxrm, 0
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT:    vaadd.vv v8, v8, v9
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: test3:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    addi sp, sp, -32
+; RV64-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    slli a1, a1, 1
+; RV64-NEXT:    sub sp, sp, a1
+; RV64-NEXT:    mv s0, a0
+; RV64-NEXT:    addi a1, sp, 16
+; RV64-NEXT:    vs1r.v v10, (a1) # Unknown-size Folded Spill
+; RV64-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    vaadd.vv v8, v8, v9
+; RV64-NEXT:    call foo at plt
+; RV64-NEXT:    vsetvli zero, s0, e8, mf8, ta, ma
+; RV64-NEXT:    csrwi vxrm, 0
+; RV64-NEXT:    addi a0, sp, 16
+; RV64-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT:    vaadd.vv v8, v8, v9
+; RV64-NEXT:    csrr a0, vlenb
+; RV64-NEXT:    slli a0, a0, 1
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 32
+; RV64-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  %b = call <vscale x 1 x i8> @foo(<vscale x 1 x i8> %a)
+  %c = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %b,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+
+  ret <vscale x 1 x i8> %c
+}
+
+; Test same vxrm with asm in between which may invalidate vxrm.
+define <vscale x 1 x i8> @test4(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: test4:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  %b = call <vscale x 1 x i8> asm "", "=^vr,0"(<vscale x 1 x i8> %a)
+  %c = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %b,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+
+  ret <vscale x 1 x i8> %c
+}
+
+; Test same rounding mode in triangle.
+define <vscale x 1 x i8> @test5(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3, i1 %cond) nounwind {
+; CHECK-LABEL: test5:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andi a1, a1, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    beqz a1, .LBB4_2
+; CHECK-NEXT:  # %bb.1: # %condblock
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:  .LBB4_2: # %mergeblock
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  br i1 %cond, label %condblock, label %mergeblock
+
+condblock:
+  %b = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %a,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+mergeblock:
+  %c = phi <vscale x 1 x i8> [%a, %entry], [%b, %condblock]
+
+  ret <vscale x 1 x i8> %c
+}
+
+; Test same rounding mode in diamond with no dominating vxrm.
+define <vscale x 1 x i8> @test6(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3, i1 %cond) nounwind {
+; CHECK-LABEL: test6:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andi a1, a1, 1
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    beqz a1, .LBB5_2
+; CHECK-NEXT:  # %bb.1: # %trueblock
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB5_2: # %falseblock
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  br i1 %cond, label %trueblock, label %falseblock
+
+trueblock:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+falseblock:
+  %b = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+mergeblock:
+  %c = phi <vscale x 1 x i8> [%a, %trueblock], [%b, %falseblock]
+
+  ret <vscale x 1 x i8> %c
+}
+
+; Test same rounding mode in diamond with same dominating vxrm.
+define <vscale x 1 x i8> @test7(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3, i1 %cond) nounwind {
+; CHECK-LABEL: test7:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andi a1, a1, 1
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    beqz a1, .LBB6_2
+; CHECK-NEXT:  # %bb.1: # %trueblock
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB6_2: # %falseblock
+; CHECK-NEXT:    vasub.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  br i1 %cond, label %trueblock, label %falseblock
+
+trueblock:
+  %b = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %a,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+falseblock:
+  %c = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %a,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+mergeblock:
+  %d = phi <vscale x 1 x i8> [%b, %trueblock], [%c, %falseblock]
+
+  ret <vscale x 1 x i8> %d
+}
+
+; Test same rounding mode in diamond with same vxrm at merge.
+define <vscale x 1 x i8> @test8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3, i1 %cond) nounwind {
+; CHECK-LABEL: test8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andi a1, a1, 1
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    beqz a1, .LBB7_2
+; CHECK-NEXT:  # %bb.1: # %trueblock
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB7_2: # %falseblock
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vasub.vv v8, v8, v9
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  br i1 %cond, label %trueblock, label %falseblock
+
+trueblock:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+falseblock:
+  %b = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+mergeblock:
+  %c = phi <vscale x 1 x i8> [%a, %trueblock], [%b, %falseblock]
+  %d = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %c,
+    <vscale x 1 x i8> %2,
+    iXLen 0, iXLen %3)
+
+  ret <vscale x 1 x i8> %d
+}
+
+; Test same rounding mode in diamond with 
diff erent vxrm at merge.
+define <vscale x 1 x i8> @test9(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3, i1 %cond) nounwind {
+; CHECK-LABEL: test9:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andi a1, a1, 1
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    beqz a1, .LBB8_2
+; CHECK-NEXT:  # %bb.1: # %trueblock
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    j .LBB8_3
+; CHECK-NEXT:  .LBB8_2: # %falseblock
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vasub.vv v8, v8, v9
+; CHECK-NEXT:  .LBB8_3: # %mergeblock
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+entry:
+  br i1 %cond, label %trueblock, label %falseblock
+
+trueblock:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+falseblock:
+  %b = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    iXLen 0, iXLen %3)
+  br label %mergeblock
+
+mergeblock:
+  %c = phi <vscale x 1 x i8> [%a, %trueblock], [%b, %falseblock]
+  %d = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(
+    <vscale x 1 x i8> undef,
+    <vscale x 1 x i8> %c,
+    <vscale x 1 x i8> %2,
+    iXLen 2, iXLen %3)
+
+  ret <vscale x 1 x i8> %d
+}
+
+; Test loop with no dominating vxrm write.
+define void @test10(i8* nocapture %ptr_dest, i8* nocapture readonly %ptr_op1, i8* nocapture readonly %ptr_op2, iXLen %n) {
+; CHECK-LABEL: test10:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    beqz a3, .LBB9_3
+; CHECK-NEXT:  # %bb.1: # %for.body.preheader
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:  .LBB9_2: # %for.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vsetvli a4, a3, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a1)
+; CHECK-NEXT:    vle8.v v9, (a2)
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    sub a3, a3, a4
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    bnez a3, .LBB9_2
+; CHECK-NEXT:  .LBB9_3: # %for.end
+; CHECK-NEXT:    ret
+entry:
+  %tobool.not9 = icmp eq iXLen %n, 0
+  br i1 %tobool.not9, label %for.end, label %for.body
+
+for.body:
+  %n.addr.011 = phi iXLen [ %n, %entry ], [ %sub, %for.body ]
+  %vl = tail call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %n.addr.011, iXLen 0, iXLen 5)
+  %load1 = tail call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8> undef, ptr %ptr_op1, iXLen %vl)
+  %load2 = tail call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8> undef, ptr %ptr_op2, iXLen %vl)
+  %vadd = tail call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %load1, <vscale x 1 x i8> %load2, iXLen 2, iXLen %vl)
+  tail call void @llvm.riscv.vse.nxv1i8.iXLen(<vscale x 1 x i8> %vadd, ptr %ptr_dest, iXLen %vl)
+  %sub = sub iXLen %n.addr.011, %vl
+  %tobool.not = icmp eq iXLen %sub, 0
+  br i1 %tobool.not, label %for.end, label %for.body
+
+for.end:
+  ret void
+}
+
+declare iXLen @llvm.riscv.vsetvli.iXLen(iXLen, iXLen immarg, iXLen immarg)
+declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8>, <vscale x 1 x i8>* nocapture, iXLen)
+declare void @llvm.riscv.vse.nxv1i8.iXLen(<vscale x 1 x i8>, <vscale x 1 x i8>* nocapture, iXLen)
+
+; Test loop with dominating vxrm write. Make sure there is no write in the loop.
+define void @test11(i8* nocapture %ptr_dest, i8* nocapture readonly %ptr_op1, i8* nocapture readonly %ptr_op2, iXLen %n) {
+; CHECK-LABEL: test11:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a4, a3, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a1)
+; CHECK-NEXT:    vle8.v v9, (a2)
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:  .LBB10_1: # %for.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    sub a3, a3, a4
+; CHECK-NEXT:    vse8.v v8, (a0)
+; CHECK-NEXT:    beqz a3, .LBB10_3
+; CHECK-NEXT:  # %bb.2: # %for.body
+; CHECK-NEXT:    # in Loop: Header=BB10_1 Depth=1
+; CHECK-NEXT:    vsetvli a4, a3, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a1)
+; CHECK-NEXT:    vle8.v v9, (a2)
+; CHECK-NEXT:    j .LBB10_1
+; CHECK-NEXT:  .LBB10_3: # %for.end
+; CHECK-NEXT:    ret
+entry:
+  %vl = tail call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %n, iXLen 0, iXLen 5)
+  %load1a = tail call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8> undef, ptr %ptr_op1, iXLen %vl)
+  %load2a = tail call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8> undef, ptr %ptr_op2, iXLen %vl)
+  %vadda = tail call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %load1a, <vscale x 1 x i8> %load2a, iXLen 2, iXLen %vl)
+  tail call void @llvm.riscv.vse.nxv1i8.iXLen(<vscale x 1 x i8> %vadda, ptr %ptr_dest, iXLen %vl)
+  %suba = sub iXLen %n, %vl
+  %tobool.not9 = icmp eq iXLen %suba, 0
+  br i1 %tobool.not9, label %for.end, label %for.body
+
+for.body:
+  %n.addr.011 = phi iXLen [ %suba, %entry ], [ %sub, %for.body ]
+  %vl2 = tail call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %n.addr.011, iXLen 0, iXLen 5)
+  %load1 = tail call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8> undef, ptr %ptr_op1, iXLen %vl2)
+  %load2 = tail call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8.iXLen(<vscale x 1 x i8> undef, ptr %ptr_op2, iXLen %vl2)
+  %vadd = tail call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %load1, <vscale x 1 x i8> %load2, iXLen 2, iXLen %vl2)
+  tail call void @llvm.riscv.vse.nxv1i8.iXLen(<vscale x 1 x i8> %vadd, ptr %ptr_dest, iXLen %vl2)
+  %sub = sub iXLen %n.addr.011, %vl2
+  %tobool.not = icmp eq iXLen %sub, 0
+  br i1 %tobool.not, label %for.end, label %for.body
+
+for.end:
+  ret void
+}
+
+; The edge from entry to block2 is a critical edge. The vxrm write in block2
+; is redundant when coming from block1, but is needed when coming from entry.
+; FIXME: We could remove the write from the end of block1 without splitting the
+; critical edge.
+define <vscale x 1 x i8> @test12(i1 %c1, <vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %vl) {
+; CHECK-LABEL: test12:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vaadd.vv v9, v8, v9
+; CHECK-NEXT:    beqz a0, .LBB11_2
+; CHECK-NEXT:  # %bb.1: # %block1
+; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vaadd.vv v9, v8, v9
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:  .LBB11_2: # %block2
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaadd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen 0, iXLen %vl)
+  br i1 %c1, label %block1, label %block2
+
+block1:
+  %b = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %0, <vscale x 1 x i8> %a, iXLen 1, iXLen %vl)
+  br label %block2
+
+block2:
+  %c = phi <vscale x 1 x i8> [ %a, %entry ], [ %b, %block1]
+  %d = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %0, <vscale x 1 x i8> %c, iXLen 2, iXLen %vl)
+  ret <vscale x 1 x i8> %d
+}
+
+; Similar to test12, but introduces a second critical edge from block1 to
+; block3. Now the write to vxrm at the end of block1, can't be removed because
+; it is needed by block3.
+define <vscale x 1 x i8> @test13(i1 %c1, i1 %c2, i1 %c3, <vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %vl) {
+; CHECK-LABEL: test13:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    vsetvli zero, a3, e8, mf8, ta, ma
+; CHECK-NEXT:    csrwi vxrm, 0
+; CHECK-NEXT:    vaadd.vv v10, v8, v9
+; CHECK-NEXT:    beqz a0, .LBB12_2
+; CHECK-NEXT:  # %bb.1: # %block1
+; CHECK-NEXT:    csrwi vxrm, 1
+; CHECK-NEXT:    vaadd.vv v10, v8, v10
+; CHECK-NEXT:    andi a1, a1, 1
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    beqz a1, .LBB12_3
+; CHECK-NEXT:  .LBB12_2: # %block2
+; CHECK-NEXT:    csrwi vxrm, 2
+; CHECK-NEXT:    vaadd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB12_3: # %block3
+; CHECK-NEXT:    vaadd.vv v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen 0, iXLen %vl)
+  br i1 %c1, label %block1, label %block2
+
+block1:
+  %b = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %0, <vscale x 1 x i8> %a, iXLen 1, iXLen %vl)
+  br i1 %c2, label %block2, label %block3
+
+block2:
+  %c = phi <vscale x 1 x i8> [ %a, %entry ], [ %b, %block1]
+  %d = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %0, <vscale x 1 x i8> %c, iXLen 2, iXLen %vl)
+  ret <vscale x 1 x i8> %d
+
+block3:
+  %e = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %1, <vscale x 1 x i8> %b, iXLen 2, iXLen %vl)
+  ret <vscale x 1 x i8> %e
+}


        


More information about the llvm-commits mailing list