[llvm] [LoongArch] Optimize *W Instructions at MI level (PR #90463)

via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 29 06:02:37 PDT 2024


https://github.com/heiher created https://github.com/llvm/llvm-project/pull/90463

Referring to RISC-V, adding an MI level pass to optimize *W instructions for LoongArch.

First it removes unneeded sext(addi.w rd, rs, 0) instructions. Either because the sign extended bits aren't consumed or because the input was already sign extended by an earlier instruction.

Then:
1. Unless explicit disabled or the target prefers instructions with W suffix, it removes the -w suffix from opw instructions whenever all users are dependent only on the lower word of the result of the instruction. The cases handled are:
   * addi.w because it helps reduce test differences between LA32 and LA64 w/o being a pessimization.

2. Or if explicit enabled or the target prefers instructions with W suffix, it adds the W suffix to the instruction whenever all users are dependent only on the lower word of the result of the instruction. The cases handled are:
   * add.d/addi.d/sub.d/mul.d.
   * slli.d with imm < 32.
   * ld.d/ld.wu.

>From 495f096a2f18480712e682243a5d1d847e871d3f Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Mon, 29 Apr 2024 16:28:57 +0800
Subject: [PATCH] [LoongArch] Optimize *W Instructions at MI level

Referring to RISC-V, adding an MI level pass to optimize *W instructions
for LoongArch.

First it removes unneeded sext(addi.w rd, rs, 0) instructions. Either
because the sign extended bits aren't consumed or because the input was
already sign extended by an earlier instruction.

Then:
1. Unless explicit disabled or the target prefers instructions with W suffix,
   it removes the -w suffix from opw instructions whenever all users are
   dependent only on the lower word of the result of the instruction.
   The cases handled are:
   * addi.w because it helps reduce test differences between LA32 and LA64
     w/o being a pessimization.

2. Or if explicit enabled or the target prefers instructions with W suffix,
   it adds the W suffix to the instruction whenever all users are dependent
   only on the lower word of the result of the instruction.
   The cases handled are:
   * add.d/addi.d/sub.d/mul.d.
   * slli.d with imm < 32.
   * ld.d/ld.wu.
---
 llvm/lib/Target/LoongArch/CMakeLists.txt      |   1 +
 llvm/lib/Target/LoongArch/LoongArch.h         |   2 +
 llvm/lib/Target/LoongArch/LoongArch.td        |   3 +
 .../LoongArch/LoongArchISelLowering.cpp       |  18 +-
 .../Target/LoongArch/LoongArchInstrInfo.cpp   |   6 +
 .../lib/Target/LoongArch/LoongArchInstrInfo.h |   3 +
 .../LoongArch/LoongArchMachineFunctionInfo.h  |   9 +
 .../Target/LoongArch/LoongArchOptWInstrs.cpp  | 815 ++++++++++++++++++
 .../LoongArch/LoongArchTargetMachine.cpp      |  10 +
 .../LoongArch/atomicrmw-uinc-udec-wrap.ll     | 242 +++---
 llvm/test/CodeGen/LoongArch/gep-imm.ll        |   8 +-
 .../CodeGen/LoongArch/ir-instruction/add.ll   |  12 +-
 .../ir-instruction/atomic-cmpxchg.ll          | 146 ++--
 .../LoongArch/ir-instruction/atomicrmw-fp.ll  |  40 -
 .../ir-instruction/atomicrmw-minmax.ll        |  80 --
 .../LoongArch/ir-instruction/atomicrmw.ll     |  80 --
 .../CodeGen/LoongArch/preferred-alignments.ll |   6 +-
 llvm/test/CodeGen/LoongArch/sextw-removal.ll  | 586 ++++++++++++-
 .../llvm/lib/Target/LoongArch/BUILD.gn        |   1 +
 19 files changed, 1604 insertions(+), 464 deletions(-)
 create mode 100644 llvm/lib/Target/LoongArch/LoongArchOptWInstrs.cpp

diff --git a/llvm/lib/Target/LoongArch/CMakeLists.txt b/llvm/lib/Target/LoongArch/CMakeLists.txt
index 5fb8b60be6c660..5085e23f82a7b6 100644
--- a/llvm/lib/Target/LoongArch/CMakeLists.txt
+++ b/llvm/lib/Target/LoongArch/CMakeLists.txt
@@ -23,6 +23,7 @@ add_llvm_target(LoongArchCodeGen
   LoongArchISelDAGToDAG.cpp
   LoongArchISelLowering.cpp
   LoongArchMCInstLower.cpp
+  LoongArchOptWInstrs.cpp
   LoongArchRegisterInfo.cpp
   LoongArchSubtarget.cpp
   LoongArchTargetMachine.cpp
diff --git a/llvm/lib/Target/LoongArch/LoongArch.h b/llvm/lib/Target/LoongArch/LoongArch.h
index 09ca089c91151b..2109176d499820 100644
--- a/llvm/lib/Target/LoongArch/LoongArch.h
+++ b/llvm/lib/Target/LoongArch/LoongArch.h
@@ -35,10 +35,12 @@ bool lowerLoongArchMachineOperandToMCOperand(const MachineOperand &MO,
 
 FunctionPass *createLoongArchExpandAtomicPseudoPass();
 FunctionPass *createLoongArchISelDag(LoongArchTargetMachine &TM);
+FunctionPass *createLoongArchOptWInstrsPass();
 FunctionPass *createLoongArchPreRAExpandPseudoPass();
 FunctionPass *createLoongArchExpandPseudoPass();
 void initializeLoongArchDAGToDAGISelPass(PassRegistry &);
 void initializeLoongArchExpandAtomicPseudoPass(PassRegistry &);
+void initializeLoongArchOptWInstrsPass(PassRegistry &);
 void initializeLoongArchPreRAExpandPseudoPass(PassRegistry &);
 void initializeLoongArchExpandPseudoPass(PassRegistry &);
 } // end namespace llvm
diff --git a/llvm/lib/Target/LoongArch/LoongArch.td b/llvm/lib/Target/LoongArch/LoongArch.td
index c2a669931d78fe..8a628157c6018d 100644
--- a/llvm/lib/Target/LoongArch/LoongArch.td
+++ b/llvm/lib/Target/LoongArch/LoongArch.td
@@ -117,6 +117,9 @@ def FeatureFrecipe
                        "Support frecipe.{s/d} and frsqrte.{s/d} instructions.">;
 def HasFrecipe : Predicate<"Subtarget->hasFrecipe()">;
 
+def TunePreferWInst
+    : SubtargetFeature<"prefer-w-inst", "PreferWInst", "true",
+                       "Prefer instructions with W suffix">;
 
 //===----------------------------------------------------------------------===//
 // Registers, instruction descriptions ...
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 285d5c2a63b2da..c59385b3d8125c 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -3746,6 +3746,7 @@ static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
 
 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
                                 const CCValAssign &VA, const SDLoc &DL,
+                                const ISD::InputArg &In,
                                 const LoongArchTargetLowering &TLI) {
   MachineFunction &MF = DAG.getMachineFunction();
   MachineRegisterInfo &RegInfo = MF.getRegInfo();
@@ -3756,6 +3757,21 @@ static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
   RegInfo.addLiveIn(VA.getLocReg(), VReg);
   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
 
+  // If input is sign extended from 32 bits, note it for the OptW pass.
+  if (In.isOrigArg()) {
+    Argument *OrigArg = MF.getFunction().getArg(In.getOrigArgIndex());
+    if (OrigArg->getType()->isIntegerTy()) {
+      unsigned BitWidth = OrigArg->getType()->getIntegerBitWidth();
+      // An input zero extended from i31 can also be considered sign extended.
+      if ((BitWidth <= 32 && In.Flags.isSExt()) ||
+          (BitWidth < 32 && In.Flags.isZExt())) {
+        LoongArchMachineFunctionInfo *LAFI =
+            MF.getInfo<LoongArchMachineFunctionInfo>();
+        LAFI->addSExt32Register(VReg);
+      }
+    }
+  }
+
   return convertLocVTToValVT(DAG, Val, VA, DL);
 }
 
@@ -3887,7 +3903,7 @@ SDValue LoongArchTargetLowering::LowerFormalArguments(
     CCValAssign &VA = ArgLocs[i];
     SDValue ArgValue;
     if (VA.isRegLoc())
-      ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
+      ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, Ins[i], *this);
     else
       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
     if (VA.getLocInfo() == CCValAssign::Indirect) {
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
index 6576100d3b3218..a0be29a2f744fd 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
@@ -534,3 +534,9 @@ LoongArchInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
       {MO_GD_PC_HI, "loongarch-gd-pc-hi"}};
   return ArrayRef(TargetFlags);
 }
+
+// Returns true if this is the sext.w pattern, addi.w rd, rs, 0.
+bool LoongArch::isSEXT_W(const MachineInstr &MI) {
+  return MI.getOpcode() == LoongArch::ADDI_W && MI.getOperand(1).isReg() &&
+         MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0;
+}
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
index 4b145d0baa4171..3b80f55bc84fc5 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
@@ -90,6 +90,9 @@ class LoongArchInstrInfo : public LoongArchGenInstrInfo {
 
 namespace LoongArch {
 
+// Returns true if this is the sext.w pattern, addi.w rd, rs, 0.
+bool isSEXT_W(const MachineInstr &MI);
+
 // Mask assignments for floating-point.
 static constexpr unsigned FClassMaskSignalingNaN = 0x001;
 static constexpr unsigned FClassMaskQuietNaN = 0x002;
diff --git a/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h b/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h
index 0d819154a89c9e..a7366a5dba0412 100644
--- a/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h
+++ b/llvm/lib/Target/LoongArch/LoongArchMachineFunctionInfo.h
@@ -36,6 +36,9 @@ class LoongArchMachineFunctionInfo : public MachineFunctionInfo {
   /// insertIndirectBranch.
   int BranchRelaxationSpillFrameIndex = -1;
 
+  /// Registers that have been sign extended from i32.
+  SmallVector<Register, 8> SExt32Registers;
+
 public:
   LoongArchMachineFunctionInfo(const Function &F,
                                const TargetSubtargetInfo *STI) {}
@@ -62,6 +65,12 @@ class LoongArchMachineFunctionInfo : public MachineFunctionInfo {
   void setBranchRelaxationSpillFrameIndex(int Index) {
     BranchRelaxationSpillFrameIndex = Index;
   }
+
+  void addSExt32Register(Register Reg) { SExt32Registers.push_back(Reg); }
+
+  bool isSExt32Register(Register Reg) const {
+    return is_contained(SExt32Registers, Reg);
+  }
 };
 
 } // end namespace llvm
diff --git a/llvm/lib/Target/LoongArch/LoongArchOptWInstrs.cpp b/llvm/lib/Target/LoongArch/LoongArchOptWInstrs.cpp
new file mode 100644
index 00000000000000..677ba11c17d516
--- /dev/null
+++ b/llvm/lib/Target/LoongArch/LoongArchOptWInstrs.cpp
@@ -0,0 +1,815 @@
+//===- LoongArchOptWInstrs.cpp - MI W instruction optimizations ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This pass does some optimizations for *W instructions at the MI level.
+//
+// First it removes unneeded sext(addi.w rd, rs, 0) instructions. Either
+// because the sign extended bits aren't consumed or because the input was
+// already sign extended by an earlier instruction.
+//
+// Then:
+// 1. Unless explicit disabled or the target prefers instructions with W suffix,
+//    it removes the -w suffix from opw instructions whenever all users are
+//    dependent only on the lower word of the result of the instruction.
+//    The cases handled are:
+//    * addi.w because it helps reduce test differences between LA32 and LA64
+//      w/o being a pessimization.
+//
+// 2. Or if explicit enabled or the target prefers instructions with W suffix,
+//    it adds the W suffix to the instruction whenever all users are dependent
+//    only on the lower word of the result of the instruction.
+//    The cases handled are:
+//    * add.d/addi.d/sub.d/mul.d.
+//    * slli.d with imm < 32.
+//    * ld.d/ld.wu.
+//===---------------------------------------------------------------------===//
+
+#include "LoongArch.h"
+#include "LoongArchMachineFunctionInfo.h"
+#include "LoongArchSubtarget.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "loongarch-opt-w-instrs"
+#define LOONGARCH_OPT_W_INSTRS_NAME "LoongArch Optimize W Instructions"
+
+STATISTIC(NumRemovedSExtW, "Number of removed sign-extensions");
+STATISTIC(NumTransformedToWInstrs,
+          "Number of instructions transformed to W-ops");
+
+static cl::opt<bool> DisableSExtWRemoval("loongarch-disable-sextw-removal",
+                                         cl::desc("Disable removal of sext.w"),
+                                         cl::init(false), cl::Hidden);
+static cl::opt<bool> DisableStripWSuffix("loongarch-disable-strip-w-suffix",
+                                         cl::desc("Disable strip W suffix"),
+                                         cl::init(false), cl::Hidden);
+
+namespace {
+
+class LoongArchOptWInstrs : public MachineFunctionPass {
+public:
+  static char ID;
+
+  LoongArchOptWInstrs() : MachineFunctionPass(ID) {}
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+  bool removeSExtWInstrs(MachineFunction &MF, const LoongArchInstrInfo &TII,
+                         const LoongArchSubtarget &ST,
+                         MachineRegisterInfo &MRI);
+  bool stripWSuffixes(MachineFunction &MF, const LoongArchInstrInfo &TII,
+                      const LoongArchSubtarget &ST, MachineRegisterInfo &MRI);
+  bool appendWSuffixes(MachineFunction &MF, const LoongArchInstrInfo &TII,
+                       const LoongArchSubtarget &ST, MachineRegisterInfo &MRI);
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesCFG();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  StringRef getPassName() const override { return LOONGARCH_OPT_W_INSTRS_NAME; }
+};
+
+} // end anonymous namespace
+
+char LoongArchOptWInstrs::ID = 0;
+INITIALIZE_PASS(LoongArchOptWInstrs, DEBUG_TYPE, LOONGARCH_OPT_W_INSTRS_NAME,
+                false, false)
+
+FunctionPass *llvm::createLoongArchOptWInstrsPass() {
+  return new LoongArchOptWInstrs();
+}
+
+// Checks if all users only demand the lower \p OrigBits of the original
+// instruction's result.
+// TODO: handle multiple interdependent transformations
+static bool hasAllNBitUsers(const MachineInstr &OrigMI,
+                            const LoongArchSubtarget &ST,
+                            const MachineRegisterInfo &MRI, unsigned OrigBits) {
+
+  SmallSet<std::pair<const MachineInstr *, unsigned>, 4> Visited;
+  SmallVector<std::pair<const MachineInstr *, unsigned>, 4> Worklist;
+
+  Worklist.push_back(std::make_pair(&OrigMI, OrigBits));
+
+  while (!Worklist.empty()) {
+    auto P = Worklist.pop_back_val();
+    const MachineInstr *MI = P.first;
+    unsigned Bits = P.second;
+
+    if (!Visited.insert(P).second)
+      continue;
+
+    // Only handle instructions with one def.
+    if (MI->getNumExplicitDefs() != 1)
+      return false;
+
+    Register DestReg = MI->getOperand(0).getReg();
+    if (!DestReg.isVirtual())
+      return false;
+
+    for (auto &UserOp : MRI.use_nodbg_operands(DestReg)) {
+      const MachineInstr *UserMI = UserOp.getParent();
+      unsigned OpIdx = UserOp.getOperandNo();
+
+      switch (UserMI->getOpcode()) {
+      default:
+        // TODO: Add vector
+        return false;
+
+      case LoongArch::ADD_W:
+      case LoongArch::ADDI_W:
+      case LoongArch::SUB_W:
+      case LoongArch::ALSL_W:
+      case LoongArch::ALSL_WU:
+      case LoongArch::MUL_W:
+      case LoongArch::MULH_W:
+      case LoongArch::MULH_WU:
+      case LoongArch::MULW_D_W:
+      case LoongArch::MULW_D_WU:
+      // TODO: {DIV,MOD}.{W,WU} consumes the upper 32 bits before LA664+.
+      // case LoongArch::DIV_W:
+      // case LoongArch::DIV_WU:
+      // case LoongArch::MOD_W:
+      // case LoongArch::MOD_WU:
+      case LoongArch::SLL_W:
+      case LoongArch::SLLI_W:
+      case LoongArch::SRL_W:
+      case LoongArch::SRLI_W:
+      case LoongArch::SRA_W:
+      case LoongArch::SRAI_W:
+      case LoongArch::ROTR_W:
+      case LoongArch::ROTRI_W:
+      case LoongArch::CLO_W:
+      case LoongArch::CLZ_W:
+      case LoongArch::CTO_W:
+      case LoongArch::CTZ_W:
+      case LoongArch::BYTEPICK_W:
+      case LoongArch::REVB_2H:
+      case LoongArch::BITREV_4B:
+      case LoongArch::BITREV_W:
+      case LoongArch::BSTRINS_W:
+      case LoongArch::BSTRPICK_W:
+      case LoongArch::CRC_W_W_W:
+      case LoongArch::CRCC_W_W_W:
+      case LoongArch::MOVGR2FCSR:
+      case LoongArch::MOVGR2FRH_W:
+      case LoongArch::MOVGR2FR_W_64:
+        if (Bits >= 32)
+          break;
+        return false;
+      case LoongArch::MOVGR2CF:
+        if (Bits >= 1)
+          break;
+        return false;
+      case LoongArch::EXT_W_B:
+        if (Bits >= 8)
+          break;
+        return false;
+      case LoongArch::EXT_W_H:
+        if (Bits >= 16)
+          break;
+        return false;
+
+      case LoongArch::SRLI_D: {
+        // If we are shifting right by less than Bits, and users don't demand
+        // any bits that were shifted into [Bits-1:0], then we can consider this
+        // as an N-Bit user.
+        unsigned ShAmt = UserMI->getOperand(2).getImm();
+        if (Bits > ShAmt) {
+          Worklist.push_back(std::make_pair(UserMI, Bits - ShAmt));
+          break;
+        }
+        return false;
+      }
+
+      // these overwrite higher input bits, otherwise the lower word of output
+      // depends only on the lower word of input. So check their uses read W.
+      case LoongArch::SLLI_D:
+        if (Bits >= (ST.getGRLen() - UserMI->getOperand(2).getImm()))
+          break;
+        Worklist.push_back(std::make_pair(UserMI, Bits));
+        break;
+      case LoongArch::ANDI: {
+        uint64_t Imm = UserMI->getOperand(2).getImm();
+        if (Bits >= (unsigned)llvm::bit_width(Imm))
+          break;
+        Worklist.push_back(std::make_pair(UserMI, Bits));
+        break;
+      }
+      case LoongArch::ORI: {
+        uint64_t Imm = UserMI->getOperand(2).getImm();
+        if (Bits >= (unsigned)llvm::bit_width<uint64_t>(~Imm))
+          break;
+        Worklist.push_back(std::make_pair(UserMI, Bits));
+        break;
+      }
+
+      case LoongArch::SLL_D:
+        // Operand 2 is the shift amount which uses log2(grlen) bits.
+        if (OpIdx == 2) {
+          if (Bits >= Log2_32(ST.getGRLen()))
+            break;
+          return false;
+        }
+        Worklist.push_back(std::make_pair(UserMI, Bits));
+        break;
+
+      case LoongArch::SRA_D:
+      case LoongArch::SRL_D:
+      case LoongArch::ROTR_D:
+        // Operand 2 is the shift amount which uses 6 bits.
+        if (OpIdx == 2 && Bits >= Log2_32(ST.getGRLen()))
+          break;
+        return false;
+
+      case LoongArch::ST_B:
+      case LoongArch::STX_B:
+      case LoongArch::STGT_B:
+      case LoongArch::STLE_B:
+      case LoongArch::IOCSRWR_B:
+        // The first argument is the value to store.
+        if (OpIdx == 0 && Bits >= 8)
+          break;
+        return false;
+      case LoongArch::ST_H:
+      case LoongArch::STX_H:
+      case LoongArch::STGT_H:
+      case LoongArch::STLE_H:
+      case LoongArch::IOCSRWR_H:
+        // The first argument is the value to store.
+        if (OpIdx == 0 && Bits >= 16)
+          break;
+        return false;
+      case LoongArch::ST_W:
+      case LoongArch::STX_W:
+      case LoongArch::SCREL_W:
+      case LoongArch::STPTR_W:
+      case LoongArch::STGT_W:
+      case LoongArch::STLE_W:
+      case LoongArch::IOCSRWR_W:
+        // The first argument is the value to store.
+        if (OpIdx == 0 && Bits >= 32)
+          break;
+        return false;
+
+      case LoongArch::CRC_W_B_W:
+      case LoongArch::CRCC_W_B_W:
+        if ((OpIdx == 1 && Bits >= 8) || (OpIdx == 2 && Bits >= 32))
+          break;
+        return false;
+      case LoongArch::CRC_W_H_W:
+      case LoongArch::CRCC_W_H_W:
+        if ((OpIdx == 1 && Bits >= 16) || (OpIdx == 2 && Bits >= 32))
+          break;
+        return false;
+      case LoongArch::CRC_W_D_W:
+      case LoongArch::CRCC_W_D_W:
+        if (OpIdx == 2 && Bits >= 32)
+          break;
+        return false;
+
+      // For these, lower word of output in these operations, depends only on
+      // the lower word of input. So, we check all uses only read lower word.
+      case LoongArch::COPY:
+      case LoongArch::PHI:
+      case LoongArch::ADD_D:
+      case LoongArch::ADDI_D:
+      case LoongArch::SUB_D:
+      case LoongArch::MUL_D:
+      case LoongArch::AND:
+      case LoongArch::OR:
+      case LoongArch::NOR:
+      case LoongArch::XOR:
+      case LoongArch::XORI:
+      case LoongArch::ANDN:
+      case LoongArch::ORN:
+        Worklist.push_back(std::make_pair(UserMI, Bits));
+        break;
+
+      case LoongArch::MASKNEZ:
+      case LoongArch::MASKEQZ:
+        if (OpIdx != 1)
+          return false;
+        Worklist.push_back(std::make_pair(UserMI, Bits));
+        break;
+      }
+    }
+  }
+
+  return true;
+}
+
+static bool hasAllWUsers(const MachineInstr &OrigMI,
+                         const LoongArchSubtarget &ST,
+                         const MachineRegisterInfo &MRI) {
+  return hasAllNBitUsers(OrigMI, ST, MRI, 32);
+}
+
+// This function returns true if the machine instruction always outputs a value
+// where bits 63:32 match bit 31.
+static bool isSignExtendingOpW(const MachineInstr &MI,
+                               const MachineRegisterInfo &MRI, unsigned OpNo) {
+  switch (MI.getOpcode()) {
+  // Normal cases
+  case LoongArch::ADD_W:
+  case LoongArch::SUB_W:
+  case LoongArch::ADDI_W:
+  case LoongArch::ALSL_W:
+  case LoongArch::LU12I_W:
+  case LoongArch::SLT:
+  case LoongArch::SLTU:
+  case LoongArch::SLTI:
+  case LoongArch::SLTUI:
+  case LoongArch::ANDI:
+  case LoongArch::MUL_W:
+  case LoongArch::MULH_W:
+  case LoongArch::MULH_WU:
+  case LoongArch::DIV_W:
+  case LoongArch::MOD_W:
+  case LoongArch::DIV_WU:
+  case LoongArch::MOD_WU:
+  case LoongArch::SLL_W:
+  case LoongArch::SRL_W:
+  case LoongArch::SRA_W:
+  case LoongArch::ROTR_W:
+  case LoongArch::SLLI_W:
+  case LoongArch::SRLI_W:
+  case LoongArch::SRAI_W:
+  case LoongArch::ROTRI_W:
+  case LoongArch::EXT_W_B:
+  case LoongArch::EXT_W_H:
+  case LoongArch::CLO_W:
+  case LoongArch::CLZ_W:
+  case LoongArch::CTO_W:
+  case LoongArch::CTZ_W:
+  case LoongArch::BYTEPICK_W:
+  case LoongArch::REVB_2H:
+  case LoongArch::BITREV_4B:
+  case LoongArch::BITREV_W:
+  case LoongArch::BSTRINS_W:
+  case LoongArch::BSTRPICK_W:
+  case LoongArch::LD_B:
+  case LoongArch::LD_H:
+  case LoongArch::LD_W:
+  case LoongArch::LD_BU:
+  case LoongArch::LD_HU:
+  case LoongArch::LL_W:
+  case LoongArch::LLACQ_W:
+  case LoongArch::RDTIMEL_W:
+  case LoongArch::RDTIMEH_W:
+  case LoongArch::CPUCFG:
+  case LoongArch::LDX_B:
+  case LoongArch::LDX_H:
+  case LoongArch::LDX_W:
+  case LoongArch::LDX_BU:
+  case LoongArch::LDX_HU:
+  case LoongArch::LDPTR_W:
+  case LoongArch::LDGT_B:
+  case LoongArch::LDGT_H:
+  case LoongArch::LDGT_W:
+  case LoongArch::LDLE_B:
+  case LoongArch::LDLE_H:
+  case LoongArch::LDLE_W:
+  case LoongArch::AMSWAP_B:
+  case LoongArch::AMSWAP_H:
+  case LoongArch::AMSWAP_W:
+  case LoongArch::AMADD_B:
+  case LoongArch::AMADD_H:
+  case LoongArch::AMADD_W:
+  case LoongArch::AMAND_W:
+  case LoongArch::AMOR_W:
+  case LoongArch::AMXOR_W:
+  case LoongArch::AMMAX_W:
+  case LoongArch::AMMIN_W:
+  case LoongArch::AMMAX_WU:
+  case LoongArch::AMMIN_WU:
+  case LoongArch::AMSWAP__DB_B:
+  case LoongArch::AMSWAP__DB_H:
+  case LoongArch::AMSWAP__DB_W:
+  case LoongArch::AMADD__DB_B:
+  case LoongArch::AMADD__DB_H:
+  case LoongArch::AMADD__DB_W:
+  case LoongArch::AMAND__DB_W:
+  case LoongArch::AMOR__DB_W:
+  case LoongArch::AMXOR__DB_W:
+  case LoongArch::AMMAX__DB_W:
+  case LoongArch::AMMIN__DB_W:
+  case LoongArch::AMMAX__DB_WU:
+  case LoongArch::AMMIN__DB_WU:
+  case LoongArch::AMCAS_B:
+  case LoongArch::AMCAS_H:
+  case LoongArch::AMCAS_W:
+  case LoongArch::AMCAS__DB_B:
+  case LoongArch::AMCAS__DB_H:
+  case LoongArch::AMCAS__DB_W:
+  case LoongArch::CRC_W_B_W:
+  case LoongArch::CRC_W_H_W:
+  case LoongArch::CRC_W_W_W:
+  case LoongArch::CRC_W_D_W:
+  case LoongArch::CRCC_W_B_W:
+  case LoongArch::CRCC_W_H_W:
+  case LoongArch::CRCC_W_W_W:
+  case LoongArch::CRCC_W_D_W:
+  case LoongArch::IOCSRRD_B:
+  case LoongArch::IOCSRRD_H:
+  case LoongArch::IOCSRRD_W:
+  case LoongArch::MOVFR2GR_S:
+  case LoongArch::MOVFCSR2GR:
+  case LoongArch::MOVCF2GR:
+  case LoongArch::MOVFRH2GR_S:
+  case LoongArch::MOVFR2GR_S_64:
+    // TODO: Add vector
+    return true;
+  // Special cases that require checking operands.
+  // shifting right sufficiently makes the value 32-bit sign-extended
+  case LoongArch::SRAI_D:
+    return MI.getOperand(2).getImm() >= 32;
+  case LoongArch::SRLI_D:
+    return MI.getOperand(2).getImm() > 32;
+  // The LI pattern ADDI rd, R0, imm and ORI rd, R0, imm are sign extended.
+  case LoongArch::ADDI_D:
+  case LoongArch::ORI:
+    return MI.getOperand(1).isReg() &&
+           MI.getOperand(1).getReg() == LoongArch::R0;
+  // A bits extract is sign extended if the msb is less than 31.
+  case LoongArch::BSTRPICK_D:
+    return MI.getOperand(2).getImm() < 31;
+  // Copying from R0 produces zero.
+  case LoongArch::COPY:
+    return MI.getOperand(1).getReg() == LoongArch::R0;
+  // Ignore the scratch register destination.
+  case LoongArch::PseudoMaskedAtomicSwap32:
+  case LoongArch::PseudoAtomicSwap32:
+  case LoongArch::PseudoMaskedAtomicLoadAdd32:
+  case LoongArch::PseudoMaskedAtomicLoadSub32:
+  case LoongArch::PseudoAtomicLoadNand32:
+  case LoongArch::PseudoMaskedAtomicLoadNand32:
+  case LoongArch::PseudoAtomicLoadAdd32:
+  case LoongArch::PseudoAtomicLoadSub32:
+  case LoongArch::PseudoAtomicLoadAnd32:
+  case LoongArch::PseudoAtomicLoadOr32:
+  case LoongArch::PseudoAtomicLoadXor32:
+  case LoongArch::PseudoMaskedAtomicLoadUMax32:
+  case LoongArch::PseudoMaskedAtomicLoadUMin32:
+  case LoongArch::PseudoCmpXchg32:
+  case LoongArch::PseudoMaskedCmpXchg32:
+  case LoongArch::PseudoMaskedAtomicLoadMax32:
+  case LoongArch::PseudoMaskedAtomicLoadMin32:
+    return OpNo == 0;
+  }
+
+  return false;
+}
+
+static bool isSignExtendedW(Register SrcReg, const LoongArchSubtarget &ST,
+                            const MachineRegisterInfo &MRI,
+                            SmallPtrSetImpl<MachineInstr *> &FixableDef) {
+  SmallSet<Register, 4> Visited;
+  SmallVector<Register, 4> Worklist;
+
+  auto AddRegToWorkList = [&](Register SrcReg) {
+    if (!SrcReg.isVirtual())
+      return false;
+    Worklist.push_back(SrcReg);
+    return true;
+  };
+
+  if (!AddRegToWorkList(SrcReg))
+    return false;
+
+  while (!Worklist.empty()) {
+    Register Reg = Worklist.pop_back_val();
+
+    // If we already visited this register, we don't need to check it again.
+    if (!Visited.insert(Reg).second)
+      continue;
+
+    MachineInstr *MI = MRI.getVRegDef(Reg);
+    if (!MI)
+      continue;
+
+    int OpNo = MI->findRegisterDefOperandIdx(Reg, /*TRI=*/nullptr);
+    assert(OpNo != -1 && "Couldn't find register");
+
+    // If this is a sign extending operation we don't need to look any further.
+    if (isSignExtendingOpW(*MI, MRI, OpNo))
+      continue;
+
+    // Is this an instruction that propagates sign extend?
+    switch (MI->getOpcode()) {
+    default:
+      // Unknown opcode, give up.
+      return false;
+    case LoongArch::COPY: {
+      const MachineFunction *MF = MI->getMF();
+      const LoongArchMachineFunctionInfo *LAFI =
+          MF->getInfo<LoongArchMachineFunctionInfo>();
+
+      // If this is the entry block and the register is livein, see if we know
+      // it is sign extended.
+      if (MI->getParent() == &MF->front()) {
+        Register VReg = MI->getOperand(0).getReg();
+        if (MF->getRegInfo().isLiveIn(VReg) && LAFI->isSExt32Register(VReg))
+          continue;
+      }
+
+      Register CopySrcReg = MI->getOperand(1).getReg();
+      if (CopySrcReg == LoongArch::R4) {
+        // For a method return value, we check the ZExt/SExt flags in attribute.
+        // We assume the following code sequence for method call.
+        // PseudoCALL @bar, ...
+        // ADJCALLSTACKUP 0, 0, implicit-def dead $r3, implicit $r3
+        // %0:gpr = COPY $r4
+        //
+        // We use the PseudoCall to look up the IR function being called to find
+        // its return attributes.
+        const MachineBasicBlock *MBB = MI->getParent();
+        auto II = MI->getIterator();
+        if (II == MBB->instr_begin() ||
+            (--II)->getOpcode() != LoongArch::ADJCALLSTACKUP)
+          return false;
+
+        const MachineInstr &CallMI = *(--II);
+        if (!CallMI.isCall() || !CallMI.getOperand(0).isGlobal())
+          return false;
+
+        auto *CalleeFn =
+            dyn_cast_if_present<Function>(CallMI.getOperand(0).getGlobal());
+        if (!CalleeFn)
+          return false;
+
+        auto *IntTy = dyn_cast<IntegerType>(CalleeFn->getReturnType());
+        if (!IntTy)
+          return false;
+
+        const AttributeSet &Attrs = CalleeFn->getAttributes().getRetAttrs();
+        unsigned BitWidth = IntTy->getBitWidth();
+        if ((BitWidth <= 32 && Attrs.hasAttribute(Attribute::SExt)) ||
+            (BitWidth < 32 && Attrs.hasAttribute(Attribute::ZExt)))
+          continue;
+      }
+
+      if (!AddRegToWorkList(CopySrcReg))
+        return false;
+
+      break;
+    }
+
+    // For these, we just need to check if the 1st operand is sign extended.
+    case LoongArch::MOD_D:
+    case LoongArch::ANDI:
+    case LoongArch::ORI:
+    case LoongArch::XORI:
+      // |Remainder| is always <= |Dividend|. If D is 32-bit, then so is R.
+      // DIV doesn't work because of the edge case 0xf..f 8000 0000 / (long)-1
+      // Logical operations use a sign extended 12-bit immediate.
+      if (!AddRegToWorkList(MI->getOperand(1).getReg()))
+        return false;
+
+      break;
+    case LoongArch::MOD_DU:
+    case LoongArch::AND:
+    case LoongArch::OR:
+    case LoongArch::XOR:
+    case LoongArch::ANDN:
+    case LoongArch::ORN:
+    case LoongArch::PHI: {
+      // If all incoming values are sign-extended, the output of AND, OR, XOR,
+      // or PHI is also sign-extended.
+
+      // The input registers for PHI are operand 1, 3, ...
+      // The input registers for others are operand 1 and 2.
+      unsigned B = 1, E = 3, D = 1;
+      switch (MI->getOpcode()) {
+      case LoongArch::PHI:
+        E = MI->getNumOperands();
+        D = 2;
+        break;
+      }
+
+      for (unsigned I = B; I != E; I += D) {
+        if (!MI->getOperand(I).isReg())
+          return false;
+
+        if (!AddRegToWorkList(MI->getOperand(I).getReg()))
+          return false;
+      }
+
+      break;
+    }
+
+    case LoongArch::MASKEQZ:
+    case LoongArch::MASKNEZ:
+      // Instructions return zero or operand 1. Result is sign extended if
+      // operand 1 is sign extended.
+      if (!AddRegToWorkList(MI->getOperand(1).getReg()))
+        return false;
+      break;
+
+    // With these opcode, we can "fix" them with the W-version
+    // if we know all users of the result only rely on bits 31:0
+    case LoongArch::SLLI_D:
+      // SLLI_W reads the lowest 5 bits, while SLLI_D reads lowest 6 bits
+      if (MI->getOperand(2).getImm() >= 32)
+        return false;
+      [[fallthrough]];
+    case LoongArch::ADDI_D:
+    case LoongArch::ADD_D:
+    case LoongArch::LD_D:
+    case LoongArch::LD_WU:
+    case LoongArch::MUL_D:
+    case LoongArch::SUB_D:
+      if (hasAllWUsers(*MI, ST, MRI)) {
+        FixableDef.insert(MI);
+        break;
+      }
+      return false;
+    }
+  }
+
+  // If we get here, then every node we visited produces a sign extended value
+  // or propagated sign extended values. So the result must be sign extended.
+  return true;
+}
+
+static unsigned getWOp(unsigned Opcode) {
+  switch (Opcode) {
+  case LoongArch::ADDI_D:
+    return LoongArch::ADDI_W;
+  case LoongArch::ADD_D:
+    return LoongArch::ADD_W;
+  case LoongArch::LD_D:
+  case LoongArch::LD_WU:
+    return LoongArch::LD_W;
+  case LoongArch::MUL_D:
+    return LoongArch::MUL_W;
+  case LoongArch::SLLI_D:
+    return LoongArch::SLLI_W;
+  case LoongArch::SUB_D:
+    return LoongArch::SUB_W;
+  default:
+    llvm_unreachable("Unexpected opcode for replacement with W variant");
+  }
+}
+
+bool LoongArchOptWInstrs::removeSExtWInstrs(MachineFunction &MF,
+                                            const LoongArchInstrInfo &TII,
+                                            const LoongArchSubtarget &ST,
+                                            MachineRegisterInfo &MRI) {
+  if (DisableSExtWRemoval)
+    return false;
+
+  bool MadeChange = false;
+  for (MachineBasicBlock &MBB : MF) {
+    for (MachineInstr &MI : llvm::make_early_inc_range(MBB)) {
+      // We're looking for the sext.w pattern ADDI.W rd, rs, 0.
+      if (!LoongArch::isSEXT_W(MI))
+        continue;
+
+      Register SrcReg = MI.getOperand(1).getReg();
+
+      SmallPtrSet<MachineInstr *, 4> FixableDefs;
+
+      // If all users only use the lower bits, this sext.w is redundant.
+      // Or if all definitions reaching MI sign-extend their output,
+      // then sext.w is redundant.
+      if (!hasAllWUsers(MI, ST, MRI) &&
+          !isSignExtendedW(SrcReg, ST, MRI, FixableDefs))
+        continue;
+
+      Register DstReg = MI.getOperand(0).getReg();
+      if (!MRI.constrainRegClass(SrcReg, MRI.getRegClass(DstReg)))
+        continue;
+
+      // Convert Fixable instructions to their W versions.
+      for (MachineInstr *Fixable : FixableDefs) {
+        LLVM_DEBUG(dbgs() << "Replacing " << *Fixable);
+        Fixable->setDesc(TII.get(getWOp(Fixable->getOpcode())));
+        Fixable->clearFlag(MachineInstr::MIFlag::NoSWrap);
+        Fixable->clearFlag(MachineInstr::MIFlag::NoUWrap);
+        Fixable->clearFlag(MachineInstr::MIFlag::IsExact);
+        LLVM_DEBUG(dbgs() << "     with " << *Fixable);
+        ++NumTransformedToWInstrs;
+      }
+
+      LLVM_DEBUG(dbgs() << "Removing redundant sign-extension\n");
+      MRI.replaceRegWith(DstReg, SrcReg);
+      MRI.clearKillFlags(SrcReg);
+      MI.eraseFromParent();
+      ++NumRemovedSExtW;
+      MadeChange = true;
+    }
+  }
+
+  return MadeChange;
+}
+
+bool LoongArchOptWInstrs::stripWSuffixes(MachineFunction &MF,
+                                         const LoongArchInstrInfo &TII,
+                                         const LoongArchSubtarget &ST,
+                                         MachineRegisterInfo &MRI) {
+  bool MadeChange = false;
+  for (MachineBasicBlock &MBB : MF) {
+    for (MachineInstr &MI : MBB) {
+      unsigned Opc;
+      switch (MI.getOpcode()) {
+      default:
+        continue;
+      case LoongArch::ADDI_W:
+        Opc = LoongArch::ADDI_D;
+        break;
+      }
+
+      if (hasAllWUsers(MI, ST, MRI)) {
+        MI.setDesc(TII.get(Opc));
+        MadeChange = true;
+      }
+    }
+  }
+
+  return MadeChange;
+}
+
+bool LoongArchOptWInstrs::appendWSuffixes(MachineFunction &MF,
+                                          const LoongArchInstrInfo &TII,
+                                          const LoongArchSubtarget &ST,
+                                          MachineRegisterInfo &MRI) {
+  bool MadeChange = false;
+  for (MachineBasicBlock &MBB : MF) {
+    for (MachineInstr &MI : MBB) {
+      unsigned WOpc;
+      // TODO: Add more?
+      switch (MI.getOpcode()) {
+      default:
+        continue;
+      case LoongArch::ADD_D:
+        WOpc = LoongArch::ADD_W;
+        break;
+      case LoongArch::ADDI_D:
+        WOpc = LoongArch::ADDI_W;
+        break;
+      case LoongArch::SUB_D:
+        WOpc = LoongArch::SUB_W;
+        break;
+      case LoongArch::MUL_D:
+        WOpc = LoongArch::MUL_W;
+        break;
+      case LoongArch::SLLI_D:
+        // SLLI.W reads the lowest 5 bits, while SLLI.D reads lowest 6 bits
+        if (MI.getOperand(2).getImm() >= 32)
+          continue;
+        WOpc = LoongArch::SLLI_W;
+        break;
+      case LoongArch::LD_D:
+      case LoongArch::LD_WU:
+        WOpc = LoongArch::LD_W;
+        break;
+      }
+
+      if (hasAllWUsers(MI, ST, MRI)) {
+        LLVM_DEBUG(dbgs() << "Replacing " << MI);
+        MI.setDesc(TII.get(WOpc));
+        MI.clearFlag(MachineInstr::MIFlag::NoSWrap);
+        MI.clearFlag(MachineInstr::MIFlag::NoUWrap);
+        MI.clearFlag(MachineInstr::MIFlag::IsExact);
+        LLVM_DEBUG(dbgs() << "     with " << MI);
+        ++NumTransformedToWInstrs;
+        MadeChange = true;
+      }
+    }
+  }
+
+  return MadeChange;
+}
+
+bool LoongArchOptWInstrs::runOnMachineFunction(MachineFunction &MF) {
+  if (skipFunction(MF.getFunction()))
+    return false;
+
+  MachineRegisterInfo &MRI = MF.getRegInfo();
+  const LoongArchSubtarget &ST = MF.getSubtarget<LoongArchSubtarget>();
+  const LoongArchInstrInfo &TII = *ST.getInstrInfo();
+
+  if (!ST.is64Bit())
+    return false;
+
+  bool MadeChange = false;
+  MadeChange |= removeSExtWInstrs(MF, TII, ST, MRI);
+
+  if (!(DisableStripWSuffix || ST.preferWInst()))
+    MadeChange |= stripWSuffixes(MF, TII, ST, MRI);
+
+  if (ST.preferWInst())
+    MadeChange |= appendWSuffixes(MF, TII, ST, MRI);
+
+  return MadeChange;
+}
diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
index e5494488e11357..2b2d4e478cc821 100644
--- a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
@@ -34,6 +34,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeLoongArchTarget() {
   RegisterTargetMachine<LoongArchTargetMachine> X(getTheLoongArch32Target());
   RegisterTargetMachine<LoongArchTargetMachine> Y(getTheLoongArch64Target());
   auto *PR = PassRegistry::getPassRegistry();
+  initializeLoongArchOptWInstrsPass(*PR);
   initializeLoongArchPreRAExpandPseudoPass(*PR);
   initializeLoongArchDAGToDAGISelPass(*PR);
 }
@@ -145,6 +146,7 @@ class LoongArchPassConfig : public TargetPassConfig {
   bool addInstSelector() override;
   void addPreEmitPass() override;
   void addPreEmitPass2() override;
+  void addMachineSSAOptimization() override;
   void addPreRegAlloc() override;
 };
 } // end namespace
@@ -187,6 +189,14 @@ void LoongArchPassConfig::addPreEmitPass2() {
   addPass(createLoongArchExpandAtomicPseudoPass());
 }
 
+void LoongArchPassConfig::addMachineSSAOptimization() {
+  TargetPassConfig::addMachineSSAOptimization();
+
+  if (TM->getTargetTriple().isLoongArch64()) {
+    addPass(createLoongArchOptWInstrsPass());
+  }
+}
+
 void LoongArchPassConfig::addPreRegAlloc() {
   addPass(createLoongArchPreRAExpandPseudoPass());
 }
diff --git a/llvm/test/CodeGen/LoongArch/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/LoongArch/atomicrmw-uinc-udec-wrap.ll
index 7cde034726e0b5..b95c2e24737a50 100644
--- a/llvm/test/CodeGen/LoongArch/atomicrmw-uinc-udec-wrap.ll
+++ b/llvm/test/CodeGen/LoongArch/atomicrmw-uinc-udec-wrap.ll
@@ -4,37 +4,37 @@
 define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
 ; LA64-LABEL: atomicrmw_uinc_wrap_i8:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    slli.d $a4, $a0, 3
+; LA64-NEXT:    slli.d $a3, $a0, 3
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:    andi $a2, $a4, 24
+; LA64-NEXT:    andi $a2, $a3, 24
 ; LA64-NEXT:    ori $a5, $zero, 255
-; LA64-NEXT:    ld.w $a3, $a0, 0
-; LA64-NEXT:    sll.w $a4, $a5, $a4
-; LA64-NEXT:    nor $a4, $a4, $zero
+; LA64-NEXT:    ld.w $a4, $a0, 0
+; LA64-NEXT:    sll.w $a3, $a5, $a3
+; LA64-NEXT:    nor $a3, $a3, $zero
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB0_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB0_3 Depth 2
-; LA64-NEXT:    srl.w $a5, $a3, $a2
-; LA64-NEXT:    addi.w $a6, $a3, 0
-; LA64-NEXT:    andi $a7, $a5, 255
-; LA64-NEXT:    addi.d $a5, $a5, 1
-; LA64-NEXT:    sltu $a7, $a7, $a1
-; LA64-NEXT:    xori $a7, $a7, 1
-; LA64-NEXT:    masknez $a5, $a5, $a7
-; LA64-NEXT:    andi $a5, $a5, 255
-; LA64-NEXT:    sll.w $a5, $a5, $a2
-; LA64-NEXT:    and $a3, $a3, $a4
-; LA64-NEXT:    or $a5, $a3, $a5
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    srl.w $a4, $a4, $a2
+; LA64-NEXT:    andi $a6, $a4, 255
+; LA64-NEXT:    addi.d $a4, $a4, 1
+; LA64-NEXT:    sltu $a6, $a6, $a1
+; LA64-NEXT:    xori $a6, $a6, 1
+; LA64-NEXT:    masknez $a4, $a4, $a6
+; LA64-NEXT:    andi $a4, $a4, 255
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    and $a6, $a5, $a3
+; LA64-NEXT:    or $a6, $a6, $a4
 ; LA64-NEXT:  .LBB0_3: # %atomicrmw.start
 ; LA64-NEXT:    # Parent Loop BB0_1 Depth=1
 ; LA64-NEXT:    # => This Inner Loop Header: Depth=2
-; LA64-NEXT:    ll.w $a3, $a0, 0
-; LA64-NEXT:    bne $a3, $a6, .LBB0_5
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    bne $a4, $a5, .LBB0_5
 ; LA64-NEXT:  # %bb.4: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB0_3 Depth=2
-; LA64-NEXT:    move $a7, $a5
+; LA64-NEXT:    move $a7, $a6
 ; LA64-NEXT:    sc.w $a7, $a0, 0
 ; LA64-NEXT:    beqz $a7, .LBB0_3
 ; LA64-NEXT:    b .LBB0_6
@@ -43,9 +43,9 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
 ; LA64-NEXT:    dbar 20
 ; LA64-NEXT:  .LBB0_6: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB0_1 Depth=1
-; LA64-NEXT:    bne $a3, $a6, .LBB0_1
+; LA64-NEXT:    bne $a4, $a5, .LBB0_1
 ; LA64-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    srl.w $a0, $a4, $a2
 ; LA64-NEXT:    ret
   %result = atomicrmw uinc_wrap ptr %ptr, i8 %val seq_cst
   ret i8 %result
@@ -54,38 +54,38 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
 define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
 ; LA64-LABEL: atomicrmw_uinc_wrap_i16:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    slli.d $a4, $a0, 3
+; LA64-NEXT:    slli.d $a3, $a0, 3
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:    andi $a2, $a4, 24
-; LA64-NEXT:    lu12i.w $a3, 15
-; LA64-NEXT:    ori $a5, $a3, 4095
-; LA64-NEXT:    ld.w $a3, $a0, 0
-; LA64-NEXT:    sll.w $a4, $a5, $a4
-; LA64-NEXT:    nor $a4, $a4, $zero
+; LA64-NEXT:    andi $a2, $a3, 24
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a5, $a4, 4095
+; LA64-NEXT:    ld.w $a4, $a0, 0
+; LA64-NEXT:    sll.w $a3, $a5, $a3
+; LA64-NEXT:    nor $a3, $a3, $zero
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB1_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB1_3 Depth 2
-; LA64-NEXT:    srl.w $a5, $a3, $a2
-; LA64-NEXT:    addi.w $a6, $a3, 0
-; LA64-NEXT:    bstrpick.d $a7, $a5, 15, 0
-; LA64-NEXT:    addi.d $a5, $a5, 1
-; LA64-NEXT:    sltu $a7, $a7, $a1
-; LA64-NEXT:    xori $a7, $a7, 1
-; LA64-NEXT:    masknez $a5, $a5, $a7
-; LA64-NEXT:    bstrpick.d $a5, $a5, 15, 0
-; LA64-NEXT:    sll.w $a5, $a5, $a2
-; LA64-NEXT:    and $a3, $a3, $a4
-; LA64-NEXT:    or $a5, $a3, $a5
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    srl.w $a4, $a4, $a2
+; LA64-NEXT:    bstrpick.d $a6, $a4, 15, 0
+; LA64-NEXT:    addi.d $a4, $a4, 1
+; LA64-NEXT:    sltu $a6, $a6, $a1
+; LA64-NEXT:    xori $a6, $a6, 1
+; LA64-NEXT:    masknez $a4, $a4, $a6
+; LA64-NEXT:    bstrpick.d $a4, $a4, 15, 0
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    and $a6, $a5, $a3
+; LA64-NEXT:    or $a6, $a6, $a4
 ; LA64-NEXT:  .LBB1_3: # %atomicrmw.start
 ; LA64-NEXT:    # Parent Loop BB1_1 Depth=1
 ; LA64-NEXT:    # => This Inner Loop Header: Depth=2
-; LA64-NEXT:    ll.w $a3, $a0, 0
-; LA64-NEXT:    bne $a3, $a6, .LBB1_5
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    bne $a4, $a5, .LBB1_5
 ; LA64-NEXT:  # %bb.4: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB1_3 Depth=2
-; LA64-NEXT:    move $a7, $a5
+; LA64-NEXT:    move $a7, $a6
 ; LA64-NEXT:    sc.w $a7, $a0, 0
 ; LA64-NEXT:    beqz $a7, .LBB1_3
 ; LA64-NEXT:    b .LBB1_6
@@ -94,9 +94,9 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
 ; LA64-NEXT:    dbar 20
 ; LA64-NEXT:  .LBB1_6: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB1_1 Depth=1
-; LA64-NEXT:    bne $a3, $a6, .LBB1_1
+; LA64-NEXT:    bne $a4, $a5, .LBB1_1
 ; LA64-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    srl.w $a0, $a4, $a2
 ; LA64-NEXT:    ret
   %result = atomicrmw uinc_wrap ptr %ptr, i16 %val seq_cst
   ret i16 %result
@@ -111,19 +111,19 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
 ; LA64-NEXT:  .LBB2_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB2_3 Depth 2
-; LA64-NEXT:    addi.d $a3, $a2, 1
-; LA64-NEXT:    addi.w $a4, $a2, 0
-; LA64-NEXT:    sltu $a2, $a4, $a1
-; LA64-NEXT:    xori $a2, $a2, 1
-; LA64-NEXT:    masknez $a3, $a3, $a2
+; LA64-NEXT:    move $a3, $a2
+; LA64-NEXT:    addi.d $a2, $a2, 1
+; LA64-NEXT:    sltu $a4, $a3, $a1
+; LA64-NEXT:    xori $a4, $a4, 1
+; LA64-NEXT:    masknez $a4, $a2, $a4
 ; LA64-NEXT:  .LBB2_3: # %atomicrmw.start
 ; LA64-NEXT:    # Parent Loop BB2_1 Depth=1
 ; LA64-NEXT:    # => This Inner Loop Header: Depth=2
 ; LA64-NEXT:    ll.w $a2, $a0, 0
-; LA64-NEXT:    bne $a2, $a4, .LBB2_5
+; LA64-NEXT:    bne $a2, $a3, .LBB2_5
 ; LA64-NEXT:  # %bb.4: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB2_3 Depth=2
-; LA64-NEXT:    move $a5, $a3
+; LA64-NEXT:    move $a5, $a4
 ; LA64-NEXT:    sc.w $a5, $a0, 0
 ; LA64-NEXT:    beqz $a5, .LBB2_3
 ; LA64-NEXT:    b .LBB2_6
@@ -132,7 +132,7 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
 ; LA64-NEXT:    dbar 20
 ; LA64-NEXT:  .LBB2_6: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB2_1 Depth=1
-; LA64-NEXT:    bne $a2, $a4, .LBB2_1
+; LA64-NEXT:    bne $a2, $a3, .LBB2_1
 ; LA64-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-NEXT:    move $a0, $a2
 ; LA64-NEXT:    ret
@@ -180,42 +180,42 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
 define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
 ; LA64-LABEL: atomicrmw_udec_wrap_i8:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    slli.d $a4, $a0, 3
+; LA64-NEXT:    slli.d $a3, $a0, 3
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:    andi $a2, $a4, 24
-; LA64-NEXT:    ori $a5, $zero, 255
-; LA64-NEXT:    ld.w $a3, $a0, 0
-; LA64-NEXT:    sll.w $a4, $a5, $a4
-; LA64-NEXT:    nor $a4, $a4, $zero
-; LA64-NEXT:    andi $a5, $a1, 255
+; LA64-NEXT:    andi $a2, $a3, 24
+; LA64-NEXT:    ori $a4, $zero, 255
+; LA64-NEXT:    ld.w $a5, $a0, 0
+; LA64-NEXT:    sll.w $a3, $a4, $a3
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    andi $a4, $a1, 255
 ; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB4_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB4_3 Depth 2
-; LA64-NEXT:    srl.w $a6, $a3, $a2
-; LA64-NEXT:    addi.w $a7, $a3, 0
-; LA64-NEXT:    andi $t0, $a6, 255
-; LA64-NEXT:    addi.d $a6, $a6, -1
-; LA64-NEXT:    sltui $t1, $t0, 1
-; LA64-NEXT:    sltu $t0, $a5, $t0
-; LA64-NEXT:    masknez $a6, $a6, $t0
-; LA64-NEXT:    maskeqz $t0, $a1, $t0
-; LA64-NEXT:    or $a6, $t0, $a6
-; LA64-NEXT:    masknez $a6, $a6, $t1
-; LA64-NEXT:    maskeqz $t0, $a1, $t1
-; LA64-NEXT:    or $a6, $t0, $a6
-; LA64-NEXT:    andi $a6, $a6, 255
-; LA64-NEXT:    sll.w $a6, $a6, $a2
-; LA64-NEXT:    and $a3, $a3, $a4
-; LA64-NEXT:    or $a6, $a3, $a6
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    srl.w $a5, $a5, $a2
+; LA64-NEXT:    andi $a7, $a5, 255
+; LA64-NEXT:    addi.d $a5, $a5, -1
+; LA64-NEXT:    sltui $t0, $a7, 1
+; LA64-NEXT:    sltu $a7, $a4, $a7
+; LA64-NEXT:    masknez $a5, $a5, $a7
+; LA64-NEXT:    maskeqz $a7, $a1, $a7
+; LA64-NEXT:    or $a5, $a7, $a5
+; LA64-NEXT:    masknez $a5, $a5, $t0
+; LA64-NEXT:    maskeqz $a7, $a1, $t0
+; LA64-NEXT:    or $a5, $a7, $a5
+; LA64-NEXT:    andi $a5, $a5, 255
+; LA64-NEXT:    sll.w $a5, $a5, $a2
+; LA64-NEXT:    and $a7, $a6, $a3
+; LA64-NEXT:    or $a7, $a7, $a5
 ; LA64-NEXT:  .LBB4_3: # %atomicrmw.start
 ; LA64-NEXT:    # Parent Loop BB4_1 Depth=1
 ; LA64-NEXT:    # => This Inner Loop Header: Depth=2
-; LA64-NEXT:    ll.w $a3, $a0, 0
-; LA64-NEXT:    bne $a3, $a7, .LBB4_5
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    bne $a5, $a6, .LBB4_5
 ; LA64-NEXT:  # %bb.4: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB4_3 Depth=2
-; LA64-NEXT:    move $t0, $a6
+; LA64-NEXT:    move $t0, $a7
 ; LA64-NEXT:    sc.w $t0, $a0, 0
 ; LA64-NEXT:    beqz $t0, .LBB4_3
 ; LA64-NEXT:    b .LBB4_6
@@ -224,9 +224,9 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
 ; LA64-NEXT:    dbar 20
 ; LA64-NEXT:  .LBB4_6: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB4_1 Depth=1
-; LA64-NEXT:    bne $a3, $a7, .LBB4_1
+; LA64-NEXT:    bne $a5, $a6, .LBB4_1
 ; LA64-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    srl.w $a0, $a5, $a2
 ; LA64-NEXT:    ret
   %result = atomicrmw udec_wrap ptr %ptr, i8 %val seq_cst
   ret i8 %result
@@ -235,43 +235,43 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
 define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
 ; LA64-LABEL: atomicrmw_udec_wrap_i16:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    slli.d $a4, $a0, 3
+; LA64-NEXT:    slli.d $a3, $a0, 3
 ; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
-; LA64-NEXT:    andi $a2, $a4, 24
-; LA64-NEXT:    lu12i.w $a3, 15
-; LA64-NEXT:    ori $a5, $a3, 4095
-; LA64-NEXT:    ld.w $a3, $a0, 0
-; LA64-NEXT:    sll.w $a4, $a5, $a4
-; LA64-NEXT:    nor $a4, $a4, $zero
-; LA64-NEXT:    bstrpick.d $a5, $a1, 15, 0
+; LA64-NEXT:    andi $a2, $a3, 24
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    ld.w $a5, $a0, 0
+; LA64-NEXT:    sll.w $a3, $a4, $a3
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    bstrpick.d $a4, $a1, 15, 0
 ; LA64-NEXT:    .p2align 4, , 16
 ; LA64-NEXT:  .LBB5_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB5_3 Depth 2
-; LA64-NEXT:    srl.w $a6, $a3, $a2
-; LA64-NEXT:    addi.w $a7, $a3, 0
-; LA64-NEXT:    bstrpick.d $t0, $a6, 15, 0
-; LA64-NEXT:    addi.d $a6, $a6, -1
-; LA64-NEXT:    sltui $t1, $t0, 1
-; LA64-NEXT:    sltu $t0, $a5, $t0
-; LA64-NEXT:    masknez $a6, $a6, $t0
-; LA64-NEXT:    maskeqz $t0, $a1, $t0
-; LA64-NEXT:    or $a6, $t0, $a6
-; LA64-NEXT:    masknez $a6, $a6, $t1
-; LA64-NEXT:    maskeqz $t0, $a1, $t1
-; LA64-NEXT:    or $a6, $t0, $a6
-; LA64-NEXT:    bstrpick.d $a6, $a6, 15, 0
-; LA64-NEXT:    sll.w $a6, $a6, $a2
-; LA64-NEXT:    and $a3, $a3, $a4
-; LA64-NEXT:    or $a6, $a3, $a6
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    srl.w $a5, $a5, $a2
+; LA64-NEXT:    bstrpick.d $a7, $a5, 15, 0
+; LA64-NEXT:    addi.d $a5, $a5, -1
+; LA64-NEXT:    sltui $t0, $a7, 1
+; LA64-NEXT:    sltu $a7, $a4, $a7
+; LA64-NEXT:    masknez $a5, $a5, $a7
+; LA64-NEXT:    maskeqz $a7, $a1, $a7
+; LA64-NEXT:    or $a5, $a7, $a5
+; LA64-NEXT:    masknez $a5, $a5, $t0
+; LA64-NEXT:    maskeqz $a7, $a1, $t0
+; LA64-NEXT:    or $a5, $a7, $a5
+; LA64-NEXT:    bstrpick.d $a5, $a5, 15, 0
+; LA64-NEXT:    sll.w $a5, $a5, $a2
+; LA64-NEXT:    and $a7, $a6, $a3
+; LA64-NEXT:    or $a7, $a7, $a5
 ; LA64-NEXT:  .LBB5_3: # %atomicrmw.start
 ; LA64-NEXT:    # Parent Loop BB5_1 Depth=1
 ; LA64-NEXT:    # => This Inner Loop Header: Depth=2
-; LA64-NEXT:    ll.w $a3, $a0, 0
-; LA64-NEXT:    bne $a3, $a7, .LBB5_5
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    bne $a5, $a6, .LBB5_5
 ; LA64-NEXT:  # %bb.4: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB5_3 Depth=2
-; LA64-NEXT:    move $t0, $a6
+; LA64-NEXT:    move $t0, $a7
 ; LA64-NEXT:    sc.w $t0, $a0, 0
 ; LA64-NEXT:    beqz $t0, .LBB5_3
 ; LA64-NEXT:    b .LBB5_6
@@ -280,9 +280,9 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
 ; LA64-NEXT:    dbar 20
 ; LA64-NEXT:  .LBB5_6: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB5_1 Depth=1
-; LA64-NEXT:    bne $a3, $a7, .LBB5_1
+; LA64-NEXT:    bne $a5, $a6, .LBB5_1
 ; LA64-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    srl.w $a0, $a5, $a2
 ; LA64-NEXT:    ret
   %result = atomicrmw udec_wrap ptr %ptr, i16 %val seq_cst
   ret i16 %result
@@ -297,24 +297,24 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
 ; LA64-NEXT:  .LBB6_1: # %atomicrmw.start
 ; LA64-NEXT:    # =>This Loop Header: Depth=1
 ; LA64-NEXT:    # Child Loop BB6_3 Depth 2
-; LA64-NEXT:    addi.d $a4, $a2, -1
-; LA64-NEXT:    addi.w $a5, $a2, 0
-; LA64-NEXT:    sltui $a2, $a5, 1
-; LA64-NEXT:    sltu $a6, $a3, $a5
-; LA64-NEXT:    masknez $a4, $a4, $a6
+; LA64-NEXT:    move $a4, $a2
+; LA64-NEXT:    addi.d $a2, $a2, -1
+; LA64-NEXT:    sltui $a5, $a4, 1
+; LA64-NEXT:    sltu $a6, $a3, $a4
+; LA64-NEXT:    masknez $a2, $a2, $a6
 ; LA64-NEXT:    maskeqz $a6, $a1, $a6
-; LA64-NEXT:    or $a4, $a6, $a4
-; LA64-NEXT:    masknez $a4, $a4, $a2
-; LA64-NEXT:    maskeqz $a2, $a1, $a2
-; LA64-NEXT:    or $a4, $a2, $a4
+; LA64-NEXT:    or $a2, $a6, $a2
+; LA64-NEXT:    masknez $a2, $a2, $a5
+; LA64-NEXT:    maskeqz $a5, $a1, $a5
+; LA64-NEXT:    or $a5, $a5, $a2
 ; LA64-NEXT:  .LBB6_3: # %atomicrmw.start
 ; LA64-NEXT:    # Parent Loop BB6_1 Depth=1
 ; LA64-NEXT:    # => This Inner Loop Header: Depth=2
 ; LA64-NEXT:    ll.w $a2, $a0, 0
-; LA64-NEXT:    bne $a2, $a5, .LBB6_5
+; LA64-NEXT:    bne $a2, $a4, .LBB6_5
 ; LA64-NEXT:  # %bb.4: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB6_3 Depth=2
-; LA64-NEXT:    move $a6, $a4
+; LA64-NEXT:    move $a6, $a5
 ; LA64-NEXT:    sc.w $a6, $a0, 0
 ; LA64-NEXT:    beqz $a6, .LBB6_3
 ; LA64-NEXT:    b .LBB6_6
@@ -323,7 +323,7 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
 ; LA64-NEXT:    dbar 20
 ; LA64-NEXT:  .LBB6_6: # %atomicrmw.start
 ; LA64-NEXT:    # in Loop: Header=BB6_1 Depth=1
-; LA64-NEXT:    bne $a2, $a5, .LBB6_1
+; LA64-NEXT:    bne $a2, $a4, .LBB6_1
 ; LA64-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-NEXT:    move $a0, $a2
 ; LA64-NEXT:    ret
diff --git a/llvm/test/CodeGen/LoongArch/gep-imm.ll b/llvm/test/CodeGen/LoongArch/gep-imm.ll
index 0eef7e4517f3d8..c88d0b5a4543f6 100644
--- a/llvm/test/CodeGen/LoongArch/gep-imm.ll
+++ b/llvm/test/CodeGen/LoongArch/gep-imm.ll
@@ -7,19 +7,17 @@ define void @test(ptr %sp, ptr %t, i32 %n) {
 ; CHECK-NEXT:    ld.d $a0, $a0, 0
 ; CHECK-NEXT:    move $a3, $zero
 ; CHECK-NEXT:    addi.w $a2, $a2, 0
-; CHECK-NEXT:    addi.w $a4, $a3, 0
-; CHECK-NEXT:    bge $a4, $a2, .LBB0_2
+; CHECK-NEXT:    bge $a3, $a2, .LBB0_2
 ; CHECK-NEXT:    .p2align 4, , 16
 ; CHECK-NEXT:  .LBB0_1: # %while_body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    addi.d $a4, $a3, 1
+; CHECK-NEXT:    addi.w $a4, $a3, 1
 ; CHECK-NEXT:    stptr.w $a4, $a0, 8000
 ; CHECK-NEXT:    stptr.w $a3, $a0, 8004
 ; CHECK-NEXT:    stptr.w $a4, $a1, 8000
 ; CHECK-NEXT:    stptr.w $a3, $a1, 8004
 ; CHECK-NEXT:    move $a3, $a4
-; CHECK-NEXT:    addi.w $a4, $a3, 0
-; CHECK-NEXT:    blt $a4, $a2, .LBB0_1
+; CHECK-NEXT:    blt $a3, $a2, .LBB0_1
 ; CHECK-NEXT:  .LBB0_2: # %while_end
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll
index 2c504efca26d13..709e0faeff90ef 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll
@@ -713,7 +713,7 @@ define signext i32 @add_i32_4080(i32 %x) {
 ;
 ; LA64-LABEL: add_i32_4080:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    addi.w $a0, $a0, 2047
+; LA64-NEXT:    addi.d $a0, $a0, 2047
 ; LA64-NEXT:    addi.w $a0, $a0, 2033
 ; LA64-NEXT:    ret
   %add = add i32 %x, 4080
@@ -729,7 +729,7 @@ define signext i32 @add_i32_minus_4080(i32 %x) {
 ;
 ; LA64-LABEL: add_i32_minus_4080:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    addi.w $a0, $a0, -2048
+; LA64-NEXT:    addi.d $a0, $a0, -2048
 ; LA64-NEXT:    addi.w $a0, $a0, -2032
 ; LA64-NEXT:    ret
   %add = add i32 %x, -4080
@@ -745,7 +745,7 @@ define signext i32 @add_i32_2048(i32 %x) {
 ;
 ; LA64-LABEL: add_i32_2048:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    addi.w $a0, $a0, 2047
+; LA64-NEXT:    addi.d $a0, $a0, 2047
 ; LA64-NEXT:    addi.w $a0, $a0, 1
 ; LA64-NEXT:    ret
   %add = add i32 %x, 2048
@@ -761,7 +761,7 @@ define signext i32 @add_i32_4094(i32 %x) {
 ;
 ; LA64-LABEL: add_i32_4094:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    addi.w $a0, $a0, 2047
+; LA64-NEXT:    addi.d $a0, $a0, 2047
 ; LA64-NEXT:    addi.w $a0, $a0, 2047
 ; LA64-NEXT:    ret
   %add = add i32 %x, 4094
@@ -777,7 +777,7 @@ define signext i32 @add_i32_minus_2049(i32 %x) {
 ;
 ; LA64-LABEL: add_i32_minus_2049:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    addi.w $a0, $a0, -2048
+; LA64-NEXT:    addi.d $a0, $a0, -2048
 ; LA64-NEXT:    addi.w $a0, $a0, -1
 ; LA64-NEXT:    ret
   %add = add i32 %x, -2049
@@ -793,7 +793,7 @@ define signext i32 @add_i32_minus_4096(i32 %x) {
 ;
 ; LA64-LABEL: add_i32_minus_4096:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    addi.w $a0, $a0, -2048
+; LA64-NEXT:    addi.d $a0, $a0, -2048
 ; LA64-NEXT:    addi.w $a0, $a0, -2048
 ; LA64-NEXT:    ret
   %add = add i32 %x, -4096
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
index 06ad89972b849b..495974a59ba67d 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
@@ -12,15 +12,12 @@ define void @cmpxchg_i8_acquire_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    andi $a2, $a2, 255
 ; LA64-NEXT:    sll.w $a2, $a2, $a3
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a2, $a2, 0
-; LA64-NEXT:    addi.w $a3, $a4, 0
 ; LA64-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    ll.w $a4, $a0, 0
-; LA64-NEXT:    and $a5, $a4, $a3
+; LA64-NEXT:    ll.w $a3, $a0, 0
+; LA64-NEXT:    and $a5, $a3, $a4
 ; LA64-NEXT:    bne $a5, $a1, .LBB0_3
 ; LA64-NEXT:  # %bb.2: # in Loop: Header=BB0_1 Depth=1
-; LA64-NEXT:    andn $a5, $a4, $a3
+; LA64-NEXT:    andn $a5, $a3, $a4
 ; LA64-NEXT:    or $a5, $a5, $a2
 ; LA64-NEXT:    sc.w $a5, $a0, 0
 ; LA64-NEXT:    beqz $a5, .LBB0_1
@@ -45,15 +42,12 @@ define void @cmpxchg_i16_acquire_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    bstrpick.d $a2, $a2, 15, 0
 ; LA64-NEXT:    sll.w $a2, $a2, $a3
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a2, $a2, 0
-; LA64-NEXT:    addi.w $a3, $a4, 0
 ; LA64-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    ll.w $a4, $a0, 0
-; LA64-NEXT:    and $a5, $a4, $a3
+; LA64-NEXT:    ll.w $a3, $a0, 0
+; LA64-NEXT:    and $a5, $a3, $a4
 ; LA64-NEXT:    bne $a5, $a1, .LBB1_3
 ; LA64-NEXT:  # %bb.2: # in Loop: Header=BB1_1 Depth=1
-; LA64-NEXT:    andn $a5, $a4, $a3
+; LA64-NEXT:    andn $a5, $a3, $a4
 ; LA64-NEXT:    or $a5, $a5, $a2
 ; LA64-NEXT:    sc.w $a5, $a0, 0
 ; LA64-NEXT:    beqz $a5, .LBB1_1
@@ -116,15 +110,12 @@ define void @cmpxchg_i8_acquire_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    andi $a2, $a2, 255
 ; LA64-NEXT:    sll.w $a2, $a2, $a3
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a2, $a2, 0
-; LA64-NEXT:    addi.w $a3, $a4, 0
 ; LA64-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    ll.w $a4, $a0, 0
-; LA64-NEXT:    and $a5, $a4, $a3
+; LA64-NEXT:    ll.w $a3, $a0, 0
+; LA64-NEXT:    and $a5, $a3, $a4
 ; LA64-NEXT:    bne $a5, $a1, .LBB4_3
 ; LA64-NEXT:  # %bb.2: # in Loop: Header=BB4_1 Depth=1
-; LA64-NEXT:    andn $a5, $a4, $a3
+; LA64-NEXT:    andn $a5, $a3, $a4
 ; LA64-NEXT:    or $a5, $a5, $a2
 ; LA64-NEXT:    sc.w $a5, $a0, 0
 ; LA64-NEXT:    beqz $a5, .LBB4_1
@@ -149,15 +140,12 @@ define void @cmpxchg_i16_acquire_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    bstrpick.d $a2, $a2, 15, 0
 ; LA64-NEXT:    sll.w $a2, $a2, $a3
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a2, $a2, 0
-; LA64-NEXT:    addi.w $a3, $a4, 0
 ; LA64-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    ll.w $a4, $a0, 0
-; LA64-NEXT:    and $a5, $a4, $a3
+; LA64-NEXT:    ll.w $a3, $a0, 0
+; LA64-NEXT:    and $a5, $a3, $a4
 ; LA64-NEXT:    bne $a5, $a1, .LBB5_3
 ; LA64-NEXT:  # %bb.2: # in Loop: Header=BB5_1 Depth=1
-; LA64-NEXT:    andn $a5, $a4, $a3
+; LA64-NEXT:    andn $a5, $a3, $a4
 ; LA64-NEXT:    or $a5, $a5, $a2
 ; LA64-NEXT:    sc.w $a5, $a0, 0
 ; LA64-NEXT:    beqz $a5, .LBB5_1
@@ -220,9 +208,6 @@ define i8 @cmpxchg_i8_acquire_acquire_reti8(ptr %ptr, i8 %cmp, i8 %val) nounwind
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    andi $a2, $a2, 255
 ; LA64-NEXT:    sll.w $a2, $a2, $a3
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a2, $a2, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
 ; LA64-NEXT:    and $a6, $a5, $a4
@@ -255,9 +240,6 @@ define i16 @cmpxchg_i16_acquire_acquire_reti16(ptr %ptr, i16 %cmp, i16 %val) nou
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    bstrpick.d $a2, $a2, 15, 0
 ; LA64-NEXT:    sll.w $a2, $a2, $a3
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a2, $a2, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
 ; LA64-NEXT:    and $a6, $a5, $a4
@@ -332,24 +314,20 @@ define i1 @cmpxchg_i8_acquire_acquire_reti1(ptr %ptr, i8 %cmp, i8 %val) nounwind
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    andi $a2, $a2, 255
 ; LA64-NEXT:    sll.w $a2, $a2, $a3
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a2, $a2, 0
-; LA64-NEXT:    addi.w $a3, $a4, 0
 ; LA64-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    ll.w $a5, $a0, 0
-; LA64-NEXT:    and $a6, $a5, $a3
-; LA64-NEXT:    bne $a6, $a1, .LBB12_3
+; LA64-NEXT:    ll.w $a3, $a0, 0
+; LA64-NEXT:    and $a5, $a3, $a4
+; LA64-NEXT:    bne $a5, $a1, .LBB12_3
 ; LA64-NEXT:  # %bb.2: # in Loop: Header=BB12_1 Depth=1
-; LA64-NEXT:    andn $a6, $a5, $a3
-; LA64-NEXT:    or $a6, $a6, $a2
-; LA64-NEXT:    sc.w $a6, $a0, 0
-; LA64-NEXT:    beqz $a6, .LBB12_1
+; LA64-NEXT:    andn $a5, $a3, $a4
+; LA64-NEXT:    or $a5, $a5, $a2
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB12_1
 ; LA64-NEXT:    b .LBB12_4
 ; LA64-NEXT:  .LBB12_3:
 ; LA64-NEXT:    dbar 20
 ; LA64-NEXT:  .LBB12_4:
-; LA64-NEXT:    and $a0, $a5, $a4
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    and $a0, $a3, $a4
 ; LA64-NEXT:    xor $a0, $a1, $a0
 ; LA64-NEXT:    sltui $a0, $a0, 1
 ; LA64-NEXT:    ret
@@ -370,24 +348,20 @@ define i1 @cmpxchg_i16_acquire_acquire_reti1(ptr %ptr, i16 %cmp, i16 %val) nounw
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    bstrpick.d $a2, $a2, 15, 0
 ; LA64-NEXT:    sll.w $a2, $a2, $a3
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a2, $a2, 0
-; LA64-NEXT:    addi.w $a3, $a4, 0
 ; LA64-NEXT:  .LBB13_1: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    ll.w $a5, $a0, 0
-; LA64-NEXT:    and $a6, $a5, $a3
-; LA64-NEXT:    bne $a6, $a1, .LBB13_3
+; LA64-NEXT:    ll.w $a3, $a0, 0
+; LA64-NEXT:    and $a5, $a3, $a4
+; LA64-NEXT:    bne $a5, $a1, .LBB13_3
 ; LA64-NEXT:  # %bb.2: # in Loop: Header=BB13_1 Depth=1
-; LA64-NEXT:    andn $a6, $a5, $a3
-; LA64-NEXT:    or $a6, $a6, $a2
-; LA64-NEXT:    sc.w $a6, $a0, 0
-; LA64-NEXT:    beqz $a6, .LBB13_1
+; LA64-NEXT:    andn $a5, $a3, $a4
+; LA64-NEXT:    or $a5, $a5, $a2
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB13_1
 ; LA64-NEXT:    b .LBB13_4
 ; LA64-NEXT:  .LBB13_3:
 ; LA64-NEXT:    dbar 20
 ; LA64-NEXT:  .LBB13_4:
-; LA64-NEXT:    and $a0, $a5, $a4
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    and $a0, $a3, $a4
 ; LA64-NEXT:    xor $a0, $a1, $a0
 ; LA64-NEXT:    sltui $a0, $a0, 1
 ; LA64-NEXT:    ret
@@ -452,15 +426,12 @@ define void @cmpxchg_i8_monotonic_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    andi $a2, $a2, 255
 ; LA64-NEXT:    sll.w $a2, $a2, $a3
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a2, $a2, 0
-; LA64-NEXT:    addi.w $a3, $a4, 0
 ; LA64-NEXT:  .LBB16_1: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    ll.w $a4, $a0, 0
-; LA64-NEXT:    and $a5, $a4, $a3
+; LA64-NEXT:    ll.w $a3, $a0, 0
+; LA64-NEXT:    and $a5, $a3, $a4
 ; LA64-NEXT:    bne $a5, $a1, .LBB16_3
 ; LA64-NEXT:  # %bb.2: # in Loop: Header=BB16_1 Depth=1
-; LA64-NEXT:    andn $a5, $a4, $a3
+; LA64-NEXT:    andn $a5, $a3, $a4
 ; LA64-NEXT:    or $a5, $a5, $a2
 ; LA64-NEXT:    sc.w $a5, $a0, 0
 ; LA64-NEXT:    beqz $a5, .LBB16_1
@@ -485,15 +456,12 @@ define void @cmpxchg_i16_monotonic_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounw
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    bstrpick.d $a2, $a2, 15, 0
 ; LA64-NEXT:    sll.w $a2, $a2, $a3
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a2, $a2, 0
-; LA64-NEXT:    addi.w $a3, $a4, 0
 ; LA64-NEXT:  .LBB17_1: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    ll.w $a4, $a0, 0
-; LA64-NEXT:    and $a5, $a4, $a3
+; LA64-NEXT:    ll.w $a3, $a0, 0
+; LA64-NEXT:    and $a5, $a3, $a4
 ; LA64-NEXT:    bne $a5, $a1, .LBB17_3
 ; LA64-NEXT:  # %bb.2: # in Loop: Header=BB17_1 Depth=1
-; LA64-NEXT:    andn $a5, $a4, $a3
+; LA64-NEXT:    andn $a5, $a3, $a4
 ; LA64-NEXT:    or $a5, $a5, $a2
 ; LA64-NEXT:    sc.w $a5, $a0, 0
 ; LA64-NEXT:    beqz $a5, .LBB17_1
@@ -556,9 +524,6 @@ define i8 @cmpxchg_i8_monotonic_monotonic_reti8(ptr %ptr, i8 %cmp, i8 %val) noun
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    andi $a2, $a2, 255
 ; LA64-NEXT:    sll.w $a2, $a2, $a3
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a2, $a2, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:  .LBB20_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
 ; LA64-NEXT:    and $a6, $a5, $a4
@@ -591,9 +556,6 @@ define i16 @cmpxchg_i16_monotonic_monotonic_reti16(ptr %ptr, i16 %cmp, i16 %val)
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    bstrpick.d $a2, $a2, 15, 0
 ; LA64-NEXT:    sll.w $a2, $a2, $a3
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a2, $a2, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:  .LBB21_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
 ; LA64-NEXT:    and $a6, $a5, $a4
@@ -668,24 +630,20 @@ define i1 @cmpxchg_i8_monotonic_monotonic_reti1(ptr %ptr, i8 %cmp, i8 %val) noun
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    andi $a2, $a2, 255
 ; LA64-NEXT:    sll.w $a2, $a2, $a3
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a2, $a2, 0
-; LA64-NEXT:    addi.w $a3, $a4, 0
 ; LA64-NEXT:  .LBB24_1: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    ll.w $a5, $a0, 0
-; LA64-NEXT:    and $a6, $a5, $a3
-; LA64-NEXT:    bne $a6, $a1, .LBB24_3
+; LA64-NEXT:    ll.w $a3, $a0, 0
+; LA64-NEXT:    and $a5, $a3, $a4
+; LA64-NEXT:    bne $a5, $a1, .LBB24_3
 ; LA64-NEXT:  # %bb.2: # in Loop: Header=BB24_1 Depth=1
-; LA64-NEXT:    andn $a6, $a5, $a3
-; LA64-NEXT:    or $a6, $a6, $a2
-; LA64-NEXT:    sc.w $a6, $a0, 0
-; LA64-NEXT:    beqz $a6, .LBB24_1
+; LA64-NEXT:    andn $a5, $a3, $a4
+; LA64-NEXT:    or $a5, $a5, $a2
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB24_1
 ; LA64-NEXT:    b .LBB24_4
 ; LA64-NEXT:  .LBB24_3:
 ; LA64-NEXT:    dbar 1792
 ; LA64-NEXT:  .LBB24_4:
-; LA64-NEXT:    and $a0, $a5, $a4
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    and $a0, $a3, $a4
 ; LA64-NEXT:    xor $a0, $a1, $a0
 ; LA64-NEXT:    sltui $a0, $a0, 1
 ; LA64-NEXT:    ret
@@ -706,24 +664,20 @@ define i1 @cmpxchg_i16_monotonic_monotonic_reti1(ptr %ptr, i16 %cmp, i16 %val) n
 ; LA64-NEXT:    sll.w $a1, $a1, $a3
 ; LA64-NEXT:    bstrpick.d $a2, $a2, 15, 0
 ; LA64-NEXT:    sll.w $a2, $a2, $a3
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a2, $a2, 0
-; LA64-NEXT:    addi.w $a3, $a4, 0
 ; LA64-NEXT:  .LBB25_1: # =>This Inner Loop Header: Depth=1
-; LA64-NEXT:    ll.w $a5, $a0, 0
-; LA64-NEXT:    and $a6, $a5, $a3
-; LA64-NEXT:    bne $a6, $a1, .LBB25_3
+; LA64-NEXT:    ll.w $a3, $a0, 0
+; LA64-NEXT:    and $a5, $a3, $a4
+; LA64-NEXT:    bne $a5, $a1, .LBB25_3
 ; LA64-NEXT:  # %bb.2: # in Loop: Header=BB25_1 Depth=1
-; LA64-NEXT:    andn $a6, $a5, $a3
-; LA64-NEXT:    or $a6, $a6, $a2
-; LA64-NEXT:    sc.w $a6, $a0, 0
-; LA64-NEXT:    beqz $a6, .LBB25_1
+; LA64-NEXT:    andn $a5, $a3, $a4
+; LA64-NEXT:    or $a5, $a5, $a2
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB25_1
 ; LA64-NEXT:    b .LBB25_4
 ; LA64-NEXT:  .LBB25_3:
 ; LA64-NEXT:    dbar 1792
 ; LA64-NEXT:  .LBB25_4:
-; LA64-NEXT:    and $a0, $a5, $a4
-; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    and $a0, $a3, $a4
 ; LA64-NEXT:    xor $a0, $a1, $a0
 ; LA64-NEXT:    sltui $a0, $a0, 1
 ; LA64-NEXT:    ret
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
index ba08790fb7cb04..81cc29419a0e0b 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-fp.ll
@@ -16,7 +16,6 @@ define float @float_fadd_acquire(ptr %p) nounwind {
 ; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB0_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB0_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -51,7 +50,6 @@ define float @float_fadd_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB0_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB0_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -90,7 +88,6 @@ define float @float_fsub_acquire(ptr %p) nounwind {
 ; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB1_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB1_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -125,7 +122,6 @@ define float @float_fsub_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB1_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB1_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -165,7 +161,6 @@ define float @float_fmin_acquire(ptr %p) nounwind {
 ; LA64F-NEXT:    fmin.s $fa2, $fa2, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB2_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB2_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -201,7 +196,6 @@ define float @float_fmin_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    fmin.s $fa2, $fa2, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB2_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB2_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -241,7 +235,6 @@ define float @float_fmax_acquire(ptr %p) nounwind {
 ; LA64F-NEXT:    fmax.s $fa2, $fa2, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB3_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB3_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -277,7 +270,6 @@ define float @float_fmax_acquire(ptr %p) nounwind {
 ; LA64D-NEXT:    fmax.s $fa2, $fa2, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB3_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB3_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -706,7 +698,6 @@ define float @float_fadd_release(ptr %p) nounwind {
 ; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB8_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB8_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -741,7 +732,6 @@ define float @float_fadd_release(ptr %p) nounwind {
 ; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB8_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB8_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -780,7 +770,6 @@ define float @float_fsub_release(ptr %p) nounwind {
 ; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB9_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB9_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -815,7 +804,6 @@ define float @float_fsub_release(ptr %p) nounwind {
 ; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB9_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB9_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -855,7 +843,6 @@ define float @float_fmin_release(ptr %p) nounwind {
 ; LA64F-NEXT:    fmin.s $fa2, $fa2, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB10_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB10_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -891,7 +878,6 @@ define float @float_fmin_release(ptr %p) nounwind {
 ; LA64D-NEXT:    fmin.s $fa2, $fa2, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB10_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB10_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -931,7 +917,6 @@ define float @float_fmax_release(ptr %p) nounwind {
 ; LA64F-NEXT:    fmax.s $fa2, $fa2, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB11_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB11_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -967,7 +952,6 @@ define float @float_fmax_release(ptr %p) nounwind {
 ; LA64D-NEXT:    fmax.s $fa2, $fa2, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB11_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB11_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -1396,7 +1380,6 @@ define float @float_fadd_acq_rel(ptr %p) nounwind {
 ; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB16_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB16_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -1431,7 +1414,6 @@ define float @float_fadd_acq_rel(ptr %p) nounwind {
 ; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB16_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB16_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -1470,7 +1452,6 @@ define float @float_fsub_acq_rel(ptr %p) nounwind {
 ; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB17_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB17_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -1505,7 +1486,6 @@ define float @float_fsub_acq_rel(ptr %p) nounwind {
 ; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB17_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB17_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -1545,7 +1525,6 @@ define float @float_fmin_acq_rel(ptr %p) nounwind {
 ; LA64F-NEXT:    fmin.s $fa2, $fa2, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB18_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB18_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -1581,7 +1560,6 @@ define float @float_fmin_acq_rel(ptr %p) nounwind {
 ; LA64D-NEXT:    fmin.s $fa2, $fa2, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB18_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB18_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -1621,7 +1599,6 @@ define float @float_fmax_acq_rel(ptr %p) nounwind {
 ; LA64F-NEXT:    fmax.s $fa2, $fa2, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB19_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB19_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -1657,7 +1634,6 @@ define float @float_fmax_acq_rel(ptr %p) nounwind {
 ; LA64D-NEXT:    fmax.s $fa2, $fa2, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB19_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB19_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -2110,7 +2086,6 @@ define float @float_fadd_seq_cst(ptr %p) nounwind {
 ; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB24_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB24_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -2145,7 +2120,6 @@ define float @float_fadd_seq_cst(ptr %p) nounwind {
 ; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB24_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB24_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -2184,7 +2158,6 @@ define float @float_fsub_seq_cst(ptr %p) nounwind {
 ; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB25_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB25_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -2219,7 +2192,6 @@ define float @float_fsub_seq_cst(ptr %p) nounwind {
 ; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB25_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB25_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -2259,7 +2231,6 @@ define float @float_fmin_seq_cst(ptr %p) nounwind {
 ; LA64F-NEXT:    fmin.s $fa2, $fa2, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB26_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB26_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -2295,7 +2266,6 @@ define float @float_fmin_seq_cst(ptr %p) nounwind {
 ; LA64D-NEXT:    fmin.s $fa2, $fa2, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB26_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB26_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -2335,7 +2305,6 @@ define float @float_fmax_seq_cst(ptr %p) nounwind {
 ; LA64F-NEXT:    fmax.s $fa2, $fa2, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB27_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB27_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -2371,7 +2340,6 @@ define float @float_fmax_seq_cst(ptr %p) nounwind {
 ; LA64D-NEXT:    fmax.s $fa2, $fa2, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB27_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB27_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -2800,7 +2768,6 @@ define float @float_fadd_monotonic(ptr %p) nounwind {
 ; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB32_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB32_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -2835,7 +2802,6 @@ define float @float_fadd_monotonic(ptr %p) nounwind {
 ; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB32_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB32_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -2874,7 +2840,6 @@ define float @float_fsub_monotonic(ptr %p) nounwind {
 ; LA64F-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB33_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB33_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -2909,7 +2874,6 @@ define float @float_fsub_monotonic(ptr %p) nounwind {
 ; LA64D-NEXT:    fadd.s $fa2, $fa0, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB33_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB33_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -2949,7 +2913,6 @@ define float @float_fmin_monotonic(ptr %p) nounwind {
 ; LA64F-NEXT:    fmin.s $fa2, $fa2, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB34_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB34_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -2985,7 +2948,6 @@ define float @float_fmin_monotonic(ptr %p) nounwind {
 ; LA64D-NEXT:    fmin.s $fa2, $fa2, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB34_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB34_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
@@ -3025,7 +2987,6 @@ define float @float_fmax_monotonic(ptr %p) nounwind {
 ; LA64F-NEXT:    fmax.s $fa2, $fa2, $fa1
 ; LA64F-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64F-NEXT:    movfr2gr.s $a2, $fa0
-; LA64F-NEXT:    addi.w $a2, $a2, 0
 ; LA64F-NEXT:  .LBB35_3: # %atomicrmw.start
 ; LA64F-NEXT:    # Parent Loop BB35_1 Depth=1
 ; LA64F-NEXT:    # => This Inner Loop Header: Depth=2
@@ -3061,7 +3022,6 @@ define float @float_fmax_monotonic(ptr %p) nounwind {
 ; LA64D-NEXT:    fmax.s $fa2, $fa2, $fa1
 ; LA64D-NEXT:    movfr2gr.s $a1, $fa2
 ; LA64D-NEXT:    movfr2gr.s $a2, $fa0
-; LA64D-NEXT:    addi.w $a2, $a2, 0
 ; LA64D-NEXT:  .LBB35_3: # %atomicrmw.start
 ; LA64D-NEXT:    # Parent Loop BB35_1 Depth=1
 ; LA64D-NEXT:    # => This Inner Loop Header: Depth=2
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-minmax.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-minmax.ll
index c36734e11f0189..794242f45fdb8c 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-minmax.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-minmax.ll
@@ -13,8 +13,6 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -44,8 +42,6 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB1_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -94,8 +90,6 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB4_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -125,8 +119,6 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB5_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -176,8 +168,6 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.b $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    xori $a3, $a3, 56
 ; LA64-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
@@ -211,8 +201,6 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.h $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    ori $a5, $zero, 48
 ; LA64-NEXT:    sub.d $a3, $a5, $a3
 ; LA64-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
@@ -266,8 +254,6 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.b $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    xori $a3, $a3, 56
 ; LA64-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
@@ -301,8 +287,6 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.h $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    ori $a5, $zero, 48
 ; LA64-NEXT:    sub.d $a3, $a5, $a3
 ; LA64-NEXT:  .LBB13_1: # =>This Inner Loop Header: Depth=1
@@ -355,8 +339,6 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB16_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -386,8 +368,6 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB17_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -436,8 +416,6 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB20_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -467,8 +445,6 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB21_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -518,8 +494,6 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.b $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    xori $a3, $a3, 56
 ; LA64-NEXT:  .LBB24_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
@@ -553,8 +527,6 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.h $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    ori $a5, $zero, 48
 ; LA64-NEXT:    sub.d $a3, $a5, $a3
 ; LA64-NEXT:  .LBB25_1: # =>This Inner Loop Header: Depth=1
@@ -608,8 +580,6 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.b $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    xori $a3, $a3, 56
 ; LA64-NEXT:  .LBB28_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
@@ -643,8 +613,6 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.h $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    ori $a5, $zero, 48
 ; LA64-NEXT:    sub.d $a3, $a5, $a3
 ; LA64-NEXT:  .LBB29_1: # =>This Inner Loop Header: Depth=1
@@ -697,8 +665,6 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB32_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -728,8 +694,6 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB33_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -778,8 +742,6 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB36_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -809,8 +771,6 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB37_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -860,8 +820,6 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.b $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    xori $a3, $a3, 56
 ; LA64-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
@@ -895,8 +853,6 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.h $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    ori $a5, $zero, 48
 ; LA64-NEXT:    sub.d $a3, $a5, $a3
 ; LA64-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
@@ -950,8 +906,6 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.b $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    xori $a3, $a3, 56
 ; LA64-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
@@ -985,8 +939,6 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.h $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    ori $a5, $zero, 48
 ; LA64-NEXT:    sub.d $a3, $a5, $a3
 ; LA64-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
@@ -1039,8 +991,6 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -1070,8 +1020,6 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -1120,8 +1068,6 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB52_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -1151,8 +1097,6 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB53_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -1202,8 +1146,6 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.b $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    xori $a3, $a3, 56
 ; LA64-NEXT:  .LBB56_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
@@ -1237,8 +1179,6 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.h $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    ori $a5, $zero, 48
 ; LA64-NEXT:    sub.d $a3, $a5, $a3
 ; LA64-NEXT:  .LBB57_1: # =>This Inner Loop Header: Depth=1
@@ -1292,8 +1232,6 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.b $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    xori $a3, $a3, 56
 ; LA64-NEXT:  .LBB60_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
@@ -1327,8 +1265,6 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.h $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    ori $a5, $zero, 48
 ; LA64-NEXT:    sub.d $a3, $a5, $a3
 ; LA64-NEXT:  .LBB61_1: # =>This Inner Loop Header: Depth=1
@@ -1381,8 +1317,6 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB64_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -1412,8 +1346,6 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB65_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -1462,8 +1394,6 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB68_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -1493,8 +1423,6 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB69_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a6, $a4, $a3
@@ -1544,8 +1472,6 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.b $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    xori $a3, $a3, 56
 ; LA64-NEXT:  .LBB72_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
@@ -1579,8 +1505,6 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.h $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    ori $a5, $zero, 48
 ; LA64-NEXT:    sub.d $a3, $a5, $a3
 ; LA64-NEXT:  .LBB73_1: # =>This Inner Loop Header: Depth=1
@@ -1634,8 +1558,6 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.b $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    xori $a3, $a3, 56
 ; LA64-NEXT:  .LBB76_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a5, $a0, 0
@@ -1669,8 +1591,6 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a4, $a4, $a2
 ; LA64-NEXT:    ext.w.h $a1, $a1
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a4, $a4, 0
 ; LA64-NEXT:    ori $a5, $zero, 48
 ; LA64-NEXT:    sub.d $a3, $a5, $a3
 ; LA64-NEXT:  .LBB77_1: # =>This Inner Loop Header: Depth=1
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll
index 4669065114f0cc..9b83b4c9535ee9 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll
@@ -31,8 +31,6 @@ define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    addi.w $a5, $a1, 0
@@ -139,8 +137,6 @@ define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    addi.w $a5, $a1, 0
@@ -290,8 +286,6 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    add.w $a5, $a4, $a1
@@ -338,8 +332,6 @@ define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    add.w $a5, $a4, $a1
@@ -425,8 +417,6 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    sub.w $a5, $a4, $a1
@@ -473,8 +463,6 @@ define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB13_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    sub.w $a5, $a4, $a1
@@ -563,8 +551,6 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB16_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a5, $a4, $a1
@@ -613,8 +599,6 @@ define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB17_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a5, $a4, $a1
@@ -1025,8 +1009,6 @@ define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB32_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    addi.w $a5, $a1, 0
@@ -1133,8 +1115,6 @@ define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB35_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    addi.w $a5, $a1, 0
@@ -1284,8 +1264,6 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    add.w $a5, $a4, $a1
@@ -1332,8 +1310,6 @@ define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    add.w $a5, $a4, $a1
@@ -1419,8 +1395,6 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    sub.w $a5, $a4, $a1
@@ -1467,8 +1441,6 @@ define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    sub.w $a5, $a4, $a1
@@ -1557,8 +1529,6 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a5, $a4, $a1
@@ -1607,8 +1577,6 @@ define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a5, $a4, $a1
@@ -2019,8 +1987,6 @@ define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB64_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    addi.w $a5, $a1, 0
@@ -2127,8 +2093,6 @@ define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB67_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    addi.w $a5, $a1, 0
@@ -2278,8 +2242,6 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB72_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    add.w $a5, $a4, $a1
@@ -2326,8 +2288,6 @@ define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB73_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    add.w $a5, $a4, $a1
@@ -2413,8 +2373,6 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB76_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    sub.w $a5, $a4, $a1
@@ -2461,8 +2419,6 @@ define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB77_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    sub.w $a5, $a4, $a1
@@ -2551,8 +2507,6 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB80_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a5, $a4, $a1
@@ -2601,8 +2555,6 @@ define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB81_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a5, $a4, $a1
@@ -3013,8 +2965,6 @@ define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB96_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    addi.w $a5, $a1, 0
@@ -3121,8 +3071,6 @@ define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB99_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    addi.w $a5, $a1, 0
@@ -3272,8 +3220,6 @@ define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB104_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    add.w $a5, $a4, $a1
@@ -3320,8 +3266,6 @@ define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB105_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    add.w $a5, $a4, $a1
@@ -3407,8 +3351,6 @@ define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB108_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    sub.w $a5, $a4, $a1
@@ -3455,8 +3397,6 @@ define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB109_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    sub.w $a5, $a4, $a1
@@ -3545,8 +3485,6 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB112_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a5, $a4, $a1
@@ -3595,8 +3533,6 @@ define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB113_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a5, $a4, $a1
@@ -4007,8 +3943,6 @@ define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB128_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    addi.w $a5, $a1, 0
@@ -4115,8 +4049,6 @@ define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB131_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    addi.w $a5, $a1, 0
@@ -4266,8 +4198,6 @@ define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB136_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    add.w $a5, $a4, $a1
@@ -4314,8 +4244,6 @@ define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB137_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    add.w $a5, $a4, $a1
@@ -4401,8 +4329,6 @@ define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB140_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    sub.w $a5, $a4, $a1
@@ -4449,8 +4375,6 @@ define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB141_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    sub.w $a5, $a4, $a1
@@ -4539,8 +4463,6 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    andi $a1, $a1, 255
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB144_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a5, $a4, $a1
@@ -4589,8 +4511,6 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-NEXT:    sll.w $a3, $a3, $a2
 ; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
 ; LA64-NEXT:    sll.w $a1, $a1, $a2
-; LA64-NEXT:    addi.w $a1, $a1, 0
-; LA64-NEXT:    addi.w $a3, $a3, 0
 ; LA64-NEXT:  .LBB145_1: # =>This Inner Loop Header: Depth=1
 ; LA64-NEXT:    ll.w $a4, $a0, 0
 ; LA64-NEXT:    and $a5, $a4, $a1
diff --git a/llvm/test/CodeGen/LoongArch/preferred-alignments.ll b/llvm/test/CodeGen/LoongArch/preferred-alignments.ll
index 2b6a109228a612..30305127b94fa8 100644
--- a/llvm/test/CodeGen/LoongArch/preferred-alignments.ll
+++ b/llvm/test/CodeGen/LoongArch/preferred-alignments.ll
@@ -13,16 +13,16 @@ define signext i32 @sum(ptr noalias nocapture noundef readonly %0, i32 noundef s
 ; LA464-NEXT:    .p2align 4, , 16
 ; LA464-NEXT:  .LBB0_2: # =>This Inner Loop Header: Depth=1
 ; LA464-NEXT:    ld.w $a3, $a0, 0
-; LA464-NEXT:    add.d $a2, $a3, $a2
+; LA464-NEXT:    add.w $a2, $a3, $a2
 ; LA464-NEXT:    addi.d $a1, $a1, -1
 ; LA464-NEXT:    addi.d $a0, $a0, 4
 ; LA464-NEXT:    bnez $a1, .LBB0_2
 ; LA464-NEXT:  # %bb.3:
-; LA464-NEXT:    addi.w $a0, $a2, 0
+; LA464-NEXT:    move $a0, $a2
 ; LA464-NEXT:    ret
 ; LA464-NEXT:  .LBB0_4:
 ; LA464-NEXT:    move $a2, $zero
-; LA464-NEXT:    addi.w $a0, $a2, 0
+; LA464-NEXT:    move $a0, $a2
 ; LA464-NEXT:    ret
   %3 = icmp sgt i32 %1, 0
   br i1 %3, label %4, label %6
diff --git a/llvm/test/CodeGen/LoongArch/sextw-removal.ll b/llvm/test/CodeGen/LoongArch/sextw-removal.ll
index 6db9c1608b3c77..7708873e264d9c 100644
--- a/llvm/test/CodeGen/LoongArch/sextw-removal.ll
+++ b/llvm/test/CodeGen/LoongArch/sextw-removal.ll
@@ -1,5 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s --mtriple=loongarch64 | FileCheck %s --check-prefixes=CHECK
+; RUN: llc < %s --mtriple=loongarch64 --loongarch-disable-sextw-removal | \
+; RUN:   FileCheck %s --check-prefix=NORMV
 
 define void @test1(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-LABEL: test1:
@@ -13,7 +15,7 @@ define void @test1(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    .p2align 4, , 16
 ; CHECK-NEXT:  .LBB0_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    addi.w $a0, $s0, 0
+; CHECK-NEXT:    move $a0, $s0
 ; CHECK-NEXT:    bl %plt(bar)
 ; CHECK-NEXT:    sll.w $s0, $s0, $fp
 ; CHECK-NEXT:    bnez $a0, .LBB0_1
@@ -23,6 +25,28 @@ define void @test1(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 32
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test1:
+; NORMV:       # %bb.0: # %bb
+; NORMV-NEXT:    addi.d $sp, $sp, -32
+; NORMV-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; NORMV-NEXT:    move $fp, $a1
+; NORMV-NEXT:    sra.w $s0, $a0, $a1
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB0_1: # %bb2
+; NORMV-NEXT:    # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    addi.w $a0, $s0, 0
+; NORMV-NEXT:    bl %plt(bar)
+; NORMV-NEXT:    sll.w $s0, $s0, $fp
+; NORMV-NEXT:    bnez $a0, .LBB0_1
+; NORMV-NEXT:  # %bb.2: # %bb7
+; NORMV-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; NORMV-NEXT:    addi.d $sp, $sp, 32
+; NORMV-NEXT:    ret
 bb:
   %i = ashr i32 %arg, %arg1
   br label %bb2
@@ -47,8 +71,16 @@ define signext i32 @test2(ptr %p, i32 signext %b) nounwind {
 ; CHECK-NEXT:    ori $a2, $zero, 1
 ; CHECK-NEXT:    sll.w $a1, $a2, $a1
 ; CHECK-NEXT:    andn $a0, $a0, $a1
-; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test2:
+; NORMV:       # %bb.0:
+; NORMV-NEXT:    ld.w $a0, $a0, 0
+; NORMV-NEXT:    ori $a2, $zero, 1
+; NORMV-NEXT:    sll.w $a1, $a2, $a1
+; NORMV-NEXT:    andn $a0, $a0, $a1
+; NORMV-NEXT:    addi.w $a0, $a0, 0
+; NORMV-NEXT:    ret
   %a = load i32, ptr %p
   %shl = shl i32 1, %b
   %neg = xor i32 %shl, -1
@@ -63,8 +95,16 @@ define signext i32 @test3(ptr %p, i32 signext %b) nounwind {
 ; CHECK-NEXT:    ori $a2, $zero, 1
 ; CHECK-NEXT:    sll.w $a1, $a2, $a1
 ; CHECK-NEXT:    orn $a0, $a0, $a1
-; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test3:
+; NORMV:       # %bb.0:
+; NORMV-NEXT:    ld.w $a0, $a0, 0
+; NORMV-NEXT:    ori $a2, $zero, 1
+; NORMV-NEXT:    sll.w $a1, $a2, $a1
+; NORMV-NEXT:    orn $a0, $a0, $a1
+; NORMV-NEXT:    addi.w $a0, $a0, 0
+; NORMV-NEXT:    ret
   %a = load i32, ptr %p
   %shl = shl i32 1, %b
   %neg = xor i32 %shl, -1
@@ -82,6 +122,16 @@ define signext i32 @test4(ptr %p, i32 signext %b) nounwind {
 ; CHECK-NEXT:    nor $a0, $a0, $zero
 ; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test4:
+; NORMV:       # %bb.0:
+; NORMV-NEXT:    ld.w $a0, $a0, 0
+; NORMV-NEXT:    ori $a2, $zero, 1
+; NORMV-NEXT:    sll.w $a1, $a2, $a1
+; NORMV-NEXT:    xor $a0, $a1, $a0
+; NORMV-NEXT:    nor $a0, $a0, $zero
+; NORMV-NEXT:    addi.w $a0, $a0, 0
+; NORMV-NEXT:    ret
   %a = load i32, ptr %p
   %shl = shl i32 1, %b
   %neg = xor i32 %shl, -1
@@ -133,6 +183,50 @@ define void @test5(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 48
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test5:
+; NORMV:       # %bb.0: # %bb
+; NORMV-NEXT:    addi.d $sp, $sp, -48
+; NORMV-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $s2, $sp, 8 # 8-byte Folded Spill
+; NORMV-NEXT:    sra.w $a1, $a0, $a1
+; NORMV-NEXT:    lu12i.w $a0, 349525
+; NORMV-NEXT:    ori $fp, $a0, 1365
+; NORMV-NEXT:    lu12i.w $a0, 209715
+; NORMV-NEXT:    ori $s0, $a0, 819
+; NORMV-NEXT:    lu12i.w $a0, 61680
+; NORMV-NEXT:    ori $s1, $a0, 3855
+; NORMV-NEXT:    lu12i.w $a0, 4112
+; NORMV-NEXT:    ori $s2, $a0, 257
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB4_1: # %bb2
+; NORMV-NEXT:    # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    addi.w $a0, $a1, 0
+; NORMV-NEXT:    bl %plt(bar)
+; NORMV-NEXT:    srli.d $a1, $a0, 1
+; NORMV-NEXT:    and $a1, $a1, $fp
+; NORMV-NEXT:    sub.d $a1, $a0, $a1
+; NORMV-NEXT:    and $a2, $a1, $s0
+; NORMV-NEXT:    srli.d $a1, $a1, 2
+; NORMV-NEXT:    and $a1, $a1, $s0
+; NORMV-NEXT:    add.d $a1, $a2, $a1
+; NORMV-NEXT:    srli.d $a2, $a1, 4
+; NORMV-NEXT:    add.d $a1, $a1, $a2
+; NORMV-NEXT:    and $a1, $a1, $s1
+; NORMV-NEXT:    mul.d $a1, $a1, $s2
+; NORMV-NEXT:    bstrpick.d $a1, $a1, 31, 24
+; NORMV-NEXT:    bnez $a0, .LBB4_1
+; NORMV-NEXT:  # %bb.2: # %bb7
+; NORMV-NEXT:    ld.d $s2, $sp, 8 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; NORMV-NEXT:    addi.d $sp, $sp, 48
+; NORMV-NEXT:    ret
 bb:
   %i = ashr i32 %arg, %arg1
   br label %bb2
@@ -177,6 +271,33 @@ define void @test6(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 32
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test6:
+; NORMV:       # %bb.0: # %bb
+; NORMV-NEXT:    addi.d $sp, $sp, -32
+; NORMV-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; NORMV-NEXT:    sra.w $fp, $a0, $a1
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB5_1: # %bb2
+; NORMV-NEXT:    # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    addi.w $a0, $fp, 0
+; NORMV-NEXT:    bl %plt(baz)
+; NORMV-NEXT:    bstrpick.d $s0, $a0, 31, 0
+; NORMV-NEXT:    move $a0, $s0
+; NORMV-NEXT:    bl %plt(__fixsfsi)
+; NORMV-NEXT:    move $fp, $a0
+; NORMV-NEXT:    move $a0, $s0
+; NORMV-NEXT:    move $a1, $zero
+; NORMV-NEXT:    bl %plt(__nesf2)
+; NORMV-NEXT:    bnez $a0, .LBB5_1
+; NORMV-NEXT:  # %bb.2: # %bb7
+; NORMV-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; NORMV-NEXT:    addi.d $sp, $sp, 32
+; NORMV-NEXT:    ret
 bb:
   %i = ashr i32 %arg, %arg1
   br label %bb2
@@ -222,7 +343,6 @@ define void @test7(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    .p2align 4, , 16
 ; CHECK-NEXT:  .LBB6_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    bl %plt(foo)
 ; CHECK-NEXT:    srli.d $a1, $a0, 1
 ; CHECK-NEXT:    and $a1, $a1, $fp
@@ -245,6 +365,58 @@ define void @test7(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 48
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test7:
+; NORMV:       # %bb.0: # %bb
+; NORMV-NEXT:    addi.d $sp, $sp, -48
+; NORMV-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $fp, $sp, 32 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $s0, $sp, 24 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $s1, $sp, 16 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $s2, $sp, 8 # 8-byte Folded Spill
+; NORMV-NEXT:    sra.w $a0, $a0, $a1
+; NORMV-NEXT:    lu12i.w $a1, 349525
+; NORMV-NEXT:    ori $a1, $a1, 1365
+; NORMV-NEXT:    lu32i.d $a1, 349525
+; NORMV-NEXT:    lu52i.d $fp, $a1, 1365
+; NORMV-NEXT:    lu12i.w $a1, 209715
+; NORMV-NEXT:    ori $a1, $a1, 819
+; NORMV-NEXT:    lu32i.d $a1, 209715
+; NORMV-NEXT:    lu52i.d $s0, $a1, 819
+; NORMV-NEXT:    lu12i.w $a1, 61680
+; NORMV-NEXT:    ori $a1, $a1, 3855
+; NORMV-NEXT:    lu32i.d $a1, -61681
+; NORMV-NEXT:    lu52i.d $s1, $a1, 240
+; NORMV-NEXT:    lu12i.w $a1, 4112
+; NORMV-NEXT:    ori $a1, $a1, 257
+; NORMV-NEXT:    lu32i.d $a1, 65793
+; NORMV-NEXT:    lu52i.d $s2, $a1, 16
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB6_1: # %bb2
+; NORMV-NEXT:    # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    addi.w $a0, $a0, 0
+; NORMV-NEXT:    bl %plt(foo)
+; NORMV-NEXT:    srli.d $a1, $a0, 1
+; NORMV-NEXT:    and $a1, $a1, $fp
+; NORMV-NEXT:    sub.d $a0, $a0, $a1
+; NORMV-NEXT:    and $a1, $a0, $s0
+; NORMV-NEXT:    srli.d $a0, $a0, 2
+; NORMV-NEXT:    and $a0, $a0, $s0
+; NORMV-NEXT:    add.d $a0, $a1, $a0
+; NORMV-NEXT:    srli.d $a1, $a0, 4
+; NORMV-NEXT:    add.d $a0, $a0, $a1
+; NORMV-NEXT:    and $a0, $a0, $s1
+; NORMV-NEXT:    mul.d $a0, $a0, $s2
+; NORMV-NEXT:    srli.d $a0, $a0, 56
+; NORMV-NEXT:    bnez $a0, .LBB6_1
+; NORMV-NEXT:  # %bb.2: # %bb7
+; NORMV-NEXT:    ld.d $s2, $sp, 8 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; NORMV-NEXT:    addi.d $sp, $sp, 48
+; NORMV-NEXT:    ret
 bb:
   %i = ashr i32 %arg, %arg1
   br label %bb2
@@ -283,6 +455,26 @@ define void @test8(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test8:
+; NORMV:       # %bb.0: # %bb
+; NORMV-NEXT:    addi.d $sp, $sp, -16
+; NORMV-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; NORMV-NEXT:    sra.w $a0, $a0, $a1
+; NORMV-NEXT:    addi.w $fp, $zero, -256
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB7_1: # %bb2
+; NORMV-NEXT:    # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    addi.w $a0, $a0, 0
+; NORMV-NEXT:    bl %plt(foo)
+; NORMV-NEXT:    or $a0, $a0, $fp
+; NORMV-NEXT:    bnez $a0, .LBB7_1
+; NORMV-NEXT:  # %bb.2: # %bb7
+; NORMV-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; NORMV-NEXT:    addi.d $sp, $sp, 16
+; NORMV-NEXT:    ret
 bb:
   %i = ashr i32 %arg, %arg1
   br label %bb2
@@ -307,20 +499,40 @@ define void @test9(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    addi.d $sp, $sp, -16
 ; CHECK-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
 ; CHECK-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
-; CHECK-NEXT:    sra.w $a1, $a0, $a1
+; CHECK-NEXT:    sra.w $a0, $a0, $a1
 ; CHECK-NEXT:    ori $fp, $zero, 254
 ; CHECK-NEXT:    .p2align 4, , 16
 ; CHECK-NEXT:  .LBB8_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    addi.w $a0, $a1, 0
 ; CHECK-NEXT:    bl %plt(bar)
-; CHECK-NEXT:    slti $a1, $a0, 255
-; CHECK-NEXT:    blt $fp, $a0, .LBB8_1
+; CHECK-NEXT:    move $a1, $a0
+; CHECK-NEXT:    slti $a0, $a0, 255
+; CHECK-NEXT:    blt $fp, $a1, .LBB8_1
 ; CHECK-NEXT:  # %bb.2: # %bb7
 ; CHECK-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test9:
+; NORMV:       # %bb.0: # %bb
+; NORMV-NEXT:    addi.d $sp, $sp, -16
+; NORMV-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; NORMV-NEXT:    sra.w $a1, $a0, $a1
+; NORMV-NEXT:    ori $fp, $zero, 254
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB8_1: # %bb2
+; NORMV-NEXT:    # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    addi.w $a0, $a1, 0
+; NORMV-NEXT:    bl %plt(bar)
+; NORMV-NEXT:    slti $a1, $a0, 255
+; NORMV-NEXT:    blt $fp, $a0, .LBB8_1
+; NORMV-NEXT:  # %bb.2: # %bb7
+; NORMV-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; NORMV-NEXT:    addi.d $sp, $sp, 16
+; NORMV-NEXT:    ret
 bb:
   %i = ashr i32 %arg, %arg1
   br label %bb2
@@ -359,6 +571,28 @@ define void @test10(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test10:
+; NORMV:       # %bb.0: # %bb
+; NORMV-NEXT:    addi.d $sp, $sp, -16
+; NORMV-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; NORMV-NEXT:    sra.w $fp, $a0, $a1
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB9_1: # %bb2
+; NORMV-NEXT:    # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    addi.w $a0, $fp, 0
+; NORMV-NEXT:    bl %plt(baz)
+; NORMV-NEXT:    move $fp, $a0
+; NORMV-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; NORMV-NEXT:    move $a1, $zero
+; NORMV-NEXT:    bl %plt(__nesf2)
+; NORMV-NEXT:    bnez $a0, .LBB9_1
+; NORMV-NEXT:  # %bb.2: # %bb7
+; NORMV-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; NORMV-NEXT:    addi.d $sp, $sp, 16
+; NORMV-NEXT:    ret
 bb:
   %i = ashr i32 %arg, %arg1
   br label %bb2
@@ -384,11 +618,25 @@ define signext i32 @test11(i64 %arg1, i64 %arg2, i64 %arg3)  {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    andi $a0, $a0, 1234
 ; CHECK-NEXT:    addi.d $a2, $a2, 1
-; CHECK-NEXT:    add.d $a0, $a0, $a1
+; CHECK-NEXT:    add.w $a0, $a0, $a1
 ; CHECK-NEXT:    bltu $a2, $a3, .LBB10_1
 ; CHECK-NEXT:  # %bb.2: # %bb7
-; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test11:
+; NORMV:       # %bb.0: # %entry
+; NORMV-NEXT:    addi.d $a2, $a2, -1
+; NORMV-NEXT:    ori $a3, $zero, 256
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB10_1: # %bb2
+; NORMV-NEXT:    # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    andi $a0, $a0, 1234
+; NORMV-NEXT:    addi.d $a2, $a2, 1
+; NORMV-NEXT:    add.d $a0, $a0, $a1
+; NORMV-NEXT:    bltu $a2, $a3, .LBB10_1
+; NORMV-NEXT:  # %bb.2: # %bb7
+; NORMV-NEXT:    addi.w $a0, $a0, 0
+; NORMV-NEXT:    ret
 entry:
   br label %bb2
 
@@ -409,21 +657,39 @@ bb7:                                              ; preds = %bb2
 define signext i32 @test12(i64 %arg1, i64 %arg2, i64 %arg3)  {
 ; CHECK-LABEL: test12:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addi.d $a2, $a2, -1
-; CHECK-NEXT:    ori $a3, $zero, 256
+; CHECK-NEXT:    addi.d $a3, $a2, -1
+; CHECK-NEXT:    ori $a4, $zero, 256
 ; CHECK-NEXT:    .p2align 4, , 16
 ; CHECK-NEXT:  .LBB11_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    xor $a0, $a0, $a1
-; CHECK-NEXT:    mul.d $a4, $a0, $a1
-; CHECK-NEXT:    add.d $a0, $a0, $a4
-; CHECK-NEXT:    and $a4, $a4, $a0
-; CHECK-NEXT:    addi.d $a2, $a2, 1
-; CHECK-NEXT:    add.d $a0, $a4, $a1
-; CHECK-NEXT:    bltu $a2, $a3, .LBB11_1
+; CHECK-NEXT:    mul.w $a2, $a0, $a1
+; CHECK-NEXT:    add.w $a0, $a0, $a2
+; CHECK-NEXT:    and $a2, $a2, $a0
+; CHECK-NEXT:    addi.d $a3, $a3, 1
+; CHECK-NEXT:    add.d $a0, $a2, $a1
+; CHECK-NEXT:    bltu $a3, $a4, .LBB11_1
 ; CHECK-NEXT:  # %bb.2: # %bb7
-; CHECK-NEXT:    addi.w $a0, $a4, 0
+; CHECK-NEXT:    move $a0, $a2
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test12:
+; NORMV:       # %bb.0: # %entry
+; NORMV-NEXT:    addi.d $a2, $a2, -1
+; NORMV-NEXT:    ori $a3, $zero, 256
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB11_1: # %bb2
+; NORMV-NEXT:    # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    xor $a0, $a0, $a1
+; NORMV-NEXT:    mul.d $a4, $a0, $a1
+; NORMV-NEXT:    add.d $a0, $a0, $a4
+; NORMV-NEXT:    and $a4, $a4, $a0
+; NORMV-NEXT:    addi.d $a2, $a2, 1
+; NORMV-NEXT:    add.d $a0, $a4, $a1
+; NORMV-NEXT:    bltu $a2, $a3, .LBB11_1
+; NORMV-NEXT:  # %bb.2: # %bb7
+; NORMV-NEXT:    addi.w $a0, $a4, 0
+; NORMV-NEXT:    ret
 entry:
   br label %bb2
 
@@ -459,6 +725,21 @@ define signext i32 @test13(i64 %arg1, i64 %arg2, i64 %arg3)  {
 ; CHECK-NEXT:  # %bb.2: # %bb7
 ; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test13:
+; NORMV:       # %bb.0: # %entry
+; NORMV-NEXT:    addi.d $a2, $a2, -1
+; NORMV-NEXT:    ori $a3, $zero, 256
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB12_1: # %bb2
+; NORMV-NEXT:    # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    div.d $a0, $a0, $a1
+; NORMV-NEXT:    addi.d $a2, $a2, 1
+; NORMV-NEXT:    add.d $a0, $a0, $a1
+; NORMV-NEXT:    bltu $a2, $a3, .LBB12_1
+; NORMV-NEXT:  # %bb.2: # %bb7
+; NORMV-NEXT:    addi.w $a0, $a0, 0
+; NORMV-NEXT:    ret
 entry:
   br label %bb2
 
@@ -489,10 +770,9 @@ define signext i32 @test14(i32 signext %0, i32 signext %1) {
 ; CHECK-NEXT:    ori $a4, $zero, 1000
 ; CHECK-NEXT:    .p2align 4, , 16
 ; CHECK-NEXT:  .LBB13_2: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    addi.w $a5, $a0, 0
-; CHECK-NEXT:    blt $a4, $a5, .LBB13_5
+; CHECK-NEXT:    blt $a4, $a0, .LBB13_5
 ; CHECK-NEXT:  # %bb.3: # in Loop: Header=BB13_2 Depth=1
-; CHECK-NEXT:    add.d $a0, $a3, $a0
+; CHECK-NEXT:    add.w $a0, $a3, $a0
 ; CHECK-NEXT:    addi.w $a3, $a3, 1
 ; CHECK-NEXT:    blt $a3, $a1, .LBB13_2
 ; CHECK-NEXT:  .LBB13_4:
@@ -501,6 +781,30 @@ define signext i32 @test14(i32 signext %0, i32 signext %1) {
 ; CHECK-NEXT:  .LBB13_5:
 ; CHECK-NEXT:    addi.w $a0, $a2, 0
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test14:
+; NORMV:       # %bb.0:
+; NORMV-NEXT:    ori $a2, $zero, 2
+; NORMV-NEXT:    blt $a1, $a2, .LBB13_4
+; NORMV-NEXT:  # %bb.1: # %.preheader
+; NORMV-NEXT:    ori $a3, $zero, 1
+; NORMV-NEXT:    addi.w $a2, $zero, -1
+; NORMV-NEXT:    lu32i.d $a2, 0
+; NORMV-NEXT:    ori $a4, $zero, 1000
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB13_2: # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    addi.w $a5, $a0, 0
+; NORMV-NEXT:    blt $a4, $a5, .LBB13_5
+; NORMV-NEXT:  # %bb.3: # in Loop: Header=BB13_2 Depth=1
+; NORMV-NEXT:    add.d $a0, $a3, $a0
+; NORMV-NEXT:    addi.w $a3, $a3, 1
+; NORMV-NEXT:    blt $a3, $a1, .LBB13_2
+; NORMV-NEXT:  .LBB13_4:
+; NORMV-NEXT:    addi.w $a0, $a0, 0
+; NORMV-NEXT:    ret
+; NORMV-NEXT:  .LBB13_5:
+; NORMV-NEXT:    addi.w $a0, $a2, 0
+; NORMV-NEXT:    ret
   %3 = icmp sgt i32 %1, 1
   br i1 %3, label %4, label %12
 
@@ -545,6 +849,30 @@ define signext i32 @test14b(i32 %0, i32 signext %1) {
 ; CHECK-NEXT:  .LBB14_5:
 ; CHECK-NEXT:    addi.w $a0, $a2, 0
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test14b:
+; NORMV:       # %bb.0:
+; NORMV-NEXT:    ori $a2, $zero, 2
+; NORMV-NEXT:    blt $a1, $a2, .LBB14_4
+; NORMV-NEXT:  # %bb.1: # %.preheader
+; NORMV-NEXT:    ori $a3, $zero, 1
+; NORMV-NEXT:    addi.w $a2, $zero, -1
+; NORMV-NEXT:    lu32i.d $a2, 0
+; NORMV-NEXT:    ori $a4, $zero, 1000
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB14_2: # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    addi.w $a5, $a0, 0
+; NORMV-NEXT:    blt $a4, $a5, .LBB14_5
+; NORMV-NEXT:  # %bb.3: # in Loop: Header=BB14_2 Depth=1
+; NORMV-NEXT:    add.d $a0, $a3, $a0
+; NORMV-NEXT:    addi.w $a3, $a3, 1
+; NORMV-NEXT:    blt $a3, $a1, .LBB14_2
+; NORMV-NEXT:  .LBB14_4:
+; NORMV-NEXT:    addi.w $a0, $a0, 0
+; NORMV-NEXT:    ret
+; NORMV-NEXT:  .LBB14_5:
+; NORMV-NEXT:    addi.w $a0, $a2, 0
+; NORMV-NEXT:    ret
   %3 = icmp sgt i32 %1, 1
   br i1 %3, label %4, label %12
 
@@ -589,6 +917,30 @@ define signext i32 @test14c(i32 zeroext %0, i32 signext %1) {
 ; CHECK-NEXT:  .LBB15_5:
 ; CHECK-NEXT:    addi.w $a0, $a2, 0
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test14c:
+; NORMV:       # %bb.0:
+; NORMV-NEXT:    ori $a2, $zero, 2
+; NORMV-NEXT:    blt $a1, $a2, .LBB15_4
+; NORMV-NEXT:  # %bb.1: # %.preheader
+; NORMV-NEXT:    ori $a3, $zero, 1
+; NORMV-NEXT:    addi.w $a2, $zero, -1
+; NORMV-NEXT:    lu32i.d $a2, 0
+; NORMV-NEXT:    ori $a4, $zero, 1000
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB15_2: # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    addi.w $a5, $a0, 0
+; NORMV-NEXT:    blt $a4, $a5, .LBB15_5
+; NORMV-NEXT:  # %bb.3: # in Loop: Header=BB15_2 Depth=1
+; NORMV-NEXT:    add.d $a0, $a3, $a0
+; NORMV-NEXT:    addi.w $a3, $a3, 1
+; NORMV-NEXT:    blt $a3, $a1, .LBB15_2
+; NORMV-NEXT:  .LBB15_4:
+; NORMV-NEXT:    addi.w $a0, $a0, 0
+; NORMV-NEXT:    ret
+; NORMV-NEXT:  .LBB15_5:
+; NORMV-NEXT:    addi.w $a0, $a2, 0
+; NORMV-NEXT:    ret
   %3 = icmp sgt i32 %1, 1
   br i1 %3, label %4, label %12
 
@@ -621,10 +973,9 @@ define signext i32 @test14d(i31 zeroext %0, i32 signext %1) {
 ; CHECK-NEXT:    ori $a4, $zero, 1000
 ; CHECK-NEXT:    .p2align 4, , 16
 ; CHECK-NEXT:  .LBB16_2: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    addi.w $a5, $a0, 0
-; CHECK-NEXT:    blt $a4, $a5, .LBB16_5
+; CHECK-NEXT:    blt $a4, $a0, .LBB16_5
 ; CHECK-NEXT:  # %bb.3: # in Loop: Header=BB16_2 Depth=1
-; CHECK-NEXT:    add.d $a0, $a3, $a0
+; CHECK-NEXT:    add.w $a0, $a3, $a0
 ; CHECK-NEXT:    addi.w $a3, $a3, 1
 ; CHECK-NEXT:    blt $a3, $a1, .LBB16_2
 ; CHECK-NEXT:  .LBB16_4:
@@ -633,6 +984,30 @@ define signext i32 @test14d(i31 zeroext %0, i32 signext %1) {
 ; CHECK-NEXT:  .LBB16_5:
 ; CHECK-NEXT:    addi.w $a0, $a2, 0
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test14d:
+; NORMV:       # %bb.0:
+; NORMV-NEXT:    ori $a2, $zero, 2
+; NORMV-NEXT:    blt $a1, $a2, .LBB16_4
+; NORMV-NEXT:  # %bb.1: # %.preheader
+; NORMV-NEXT:    ori $a3, $zero, 1
+; NORMV-NEXT:    addi.w $a2, $zero, -1
+; NORMV-NEXT:    lu32i.d $a2, 0
+; NORMV-NEXT:    ori $a4, $zero, 1000
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB16_2: # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    addi.w $a5, $a0, 0
+; NORMV-NEXT:    blt $a4, $a5, .LBB16_5
+; NORMV-NEXT:  # %bb.3: # in Loop: Header=BB16_2 Depth=1
+; NORMV-NEXT:    add.d $a0, $a3, $a0
+; NORMV-NEXT:    addi.w $a3, $a3, 1
+; NORMV-NEXT:    blt $a3, $a1, .LBB16_2
+; NORMV-NEXT:  .LBB16_4:
+; NORMV-NEXT:    addi.w $a0, $a0, 0
+; NORMV-NEXT:    ret
+; NORMV-NEXT:  .LBB16_5:
+; NORMV-NEXT:    addi.w $a0, $a2, 0
+; NORMV-NEXT:    ret
   %zext = zext i31 %0 to i32
   %3 = icmp sgt i32 %1, 1
   br i1 %3, label %4, label %12
@@ -663,13 +1038,28 @@ define signext i32 @test15(i64 %arg1, i64 %arg2, i64 %arg3, ptr %arg4)  {
 ; CHECK-NEXT:  .LBB17_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    andi $a0, $a0, 1234
-; CHECK-NEXT:    add.d $a0, $a0, $a1
+; CHECK-NEXT:    add.w $a0, $a0, $a1
 ; CHECK-NEXT:    addi.d $a2, $a2, 1
 ; CHECK-NEXT:    st.w $a0, $a3, 0
 ; CHECK-NEXT:    bltu $a2, $a4, .LBB17_1
 ; CHECK-NEXT:  # %bb.2: # %bb7
-; CHECK-NEXT:    addi.w $a0, $a0, 0
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test15:
+; NORMV:       # %bb.0: # %entry
+; NORMV-NEXT:    addi.d $a2, $a2, -1
+; NORMV-NEXT:    ori $a4, $zero, 256
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB17_1: # %bb2
+; NORMV-NEXT:    # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    andi $a0, $a0, 1234
+; NORMV-NEXT:    add.d $a0, $a0, $a1
+; NORMV-NEXT:    addi.d $a2, $a2, 1
+; NORMV-NEXT:    st.w $a0, $a3, 0
+; NORMV-NEXT:    bltu $a2, $a4, .LBB17_1
+; NORMV-NEXT:  # %bb.2: # %bb7
+; NORMV-NEXT:    addi.w $a0, $a0, 0
+; NORMV-NEXT:    ret
 entry:
   br label %bb2
 
@@ -738,12 +1128,66 @@ define signext i32 @bug(i32 signext %x) {
 ; CHECK-NEXT:    or $a1, $a2, $a1
 ; CHECK-NEXT:    srai.d $a0, $a0, 31
 ; CHECK-NEXT:    nor $a0, $a0, $zero
-; CHECK-NEXT:    add.d $a0, $a1, $a0
-; CHECK-NEXT:    addi.w $a0, $a0, 0
+; CHECK-NEXT:    add.w $a0, $a1, $a0
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB18_2:
-; CHECK-NEXT:    addi.w $a0, $zero, 0
+; CHECK-NEXT:    move $a0, $zero
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: bug:
+; NORMV:       # %bb.0: # %entry
+; NORMV-NEXT:    beqz $a0, .LBB18_2
+; NORMV-NEXT:  # %bb.1: # %if.end
+; NORMV-NEXT:    bstrpick.d $a1, $a0, 31, 16
+; NORMV-NEXT:    sltui $a1, $a1, 1
+; NORMV-NEXT:    slli.d $a2, $a0, 16
+; NORMV-NEXT:    masknez $a0, $a0, $a1
+; NORMV-NEXT:    maskeqz $a2, $a2, $a1
+; NORMV-NEXT:    or $a0, $a2, $a0
+; NORMV-NEXT:    ori $a2, $zero, 32
+; NORMV-NEXT:    masknez $a2, $a2, $a1
+; NORMV-NEXT:    ori $a3, $zero, 16
+; NORMV-NEXT:    maskeqz $a1, $a3, $a1
+; NORMV-NEXT:    or $a1, $a1, $a2
+; NORMV-NEXT:    bstrpick.d $a2, $a0, 31, 24
+; NORMV-NEXT:    sltui $a2, $a2, 1
+; NORMV-NEXT:    slli.d $a3, $a0, 8
+; NORMV-NEXT:    addi.d $a4, $a1, -8
+; NORMV-NEXT:    masknez $a0, $a0, $a2
+; NORMV-NEXT:    maskeqz $a3, $a3, $a2
+; NORMV-NEXT:    or $a0, $a3, $a0
+; NORMV-NEXT:    masknez $a1, $a1, $a2
+; NORMV-NEXT:    maskeqz $a2, $a4, $a2
+; NORMV-NEXT:    or $a1, $a2, $a1
+; NORMV-NEXT:    bstrpick.d $a2, $a0, 31, 28
+; NORMV-NEXT:    sltui $a2, $a2, 1
+; NORMV-NEXT:    slli.d $a3, $a0, 4
+; NORMV-NEXT:    addi.d $a4, $a1, -4
+; NORMV-NEXT:    masknez $a0, $a0, $a2
+; NORMV-NEXT:    maskeqz $a3, $a3, $a2
+; NORMV-NEXT:    or $a0, $a3, $a0
+; NORMV-NEXT:    masknez $a1, $a1, $a2
+; NORMV-NEXT:    maskeqz $a2, $a4, $a2
+; NORMV-NEXT:    or $a1, $a2, $a1
+; NORMV-NEXT:    bstrpick.d $a2, $a0, 31, 30
+; NORMV-NEXT:    sltui $a2, $a2, 1
+; NORMV-NEXT:    slli.d $a3, $a0, 2
+; NORMV-NEXT:    addi.d $a4, $a1, -2
+; NORMV-NEXT:    masknez $a0, $a0, $a2
+; NORMV-NEXT:    maskeqz $a3, $a3, $a2
+; NORMV-NEXT:    or $a0, $a3, $a0
+; NORMV-NEXT:    addi.w $a0, $a0, 0
+; NORMV-NEXT:    masknez $a1, $a1, $a2
+; NORMV-NEXT:    maskeqz $a2, $a4, $a2
+; NORMV-NEXT:    or $a1, $a2, $a1
+; NORMV-NEXT:    srai.d $a0, $a0, 31
+; NORMV-NEXT:    nor $a0, $a0, $zero
+; NORMV-NEXT:    add.d $a0, $a1, $a0
+; NORMV-NEXT:    addi.w $a0, $a0, 0
+; NORMV-NEXT:    ret
+; NORMV-NEXT:  .LBB18_2:
+; NORMV-NEXT:    addi.w $a0, $zero, 0
+; NORMV-NEXT:    ret
 entry:
   %tobool.not = icmp eq i32 %x, 0
   br i1 %tobool.not, label %cleanup, label %if.end
@@ -791,7 +1235,7 @@ define void @test16(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    .p2align 4, , 16
 ; CHECK-NEXT:  .LBB19_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    addi.w $a0, $s0, 0
+; CHECK-NEXT:    move $a0, $s0
 ; CHECK-NEXT:    bl %plt(bar)
 ; CHECK-NEXT:    sll.w $s0, $s0, $fp
 ; CHECK-NEXT:    bnez $a0, .LBB19_1
@@ -801,6 +1245,29 @@ define void @test16(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 32
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test16:
+; NORMV:       # %bb.0: # %bb
+; NORMV-NEXT:    addi.d $sp, $sp, -32
+; NORMV-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; NORMV-NEXT:    move $fp, $a1
+; NORMV-NEXT:    bl %plt(bar)
+; NORMV-NEXT:    move $s0, $a0
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB19_1: # %bb2
+; NORMV-NEXT:    # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    addi.w $a0, $s0, 0
+; NORMV-NEXT:    bl %plt(bar)
+; NORMV-NEXT:    sll.w $s0, $s0, $fp
+; NORMV-NEXT:    bnez $a0, .LBB19_1
+; NORMV-NEXT:  # %bb.2: # %bb7
+; NORMV-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; NORMV-NEXT:    addi.d $sp, $sp, 32
+; NORMV-NEXT:    ret
 bb:
   %i = call signext i32 @bar(i32 signext %arg)
   br label %bb2
@@ -829,7 +1296,7 @@ define void @test17(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    .p2align 4, , 16
 ; CHECK-NEXT:  .LBB20_1: # %bb2
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    addi.w $a0, $s0, 0
+; CHECK-NEXT:    move $a0, $s0
 ; CHECK-NEXT:    bl %plt(bar)
 ; CHECK-NEXT:    sll.w $s0, $s0, $fp
 ; CHECK-NEXT:    bnez $a0, .LBB20_1
@@ -839,6 +1306,29 @@ define void @test17(i32 signext %arg, i32 signext %arg1) nounwind {
 ; CHECK-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 32
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test17:
+; NORMV:       # %bb.0: # %bb
+; NORMV-NEXT:    addi.d $sp, $sp, -32
+; NORMV-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; NORMV-NEXT:    move $fp, $a1
+; NORMV-NEXT:    bl %plt(bat)
+; NORMV-NEXT:    move $s0, $a0
+; NORMV-NEXT:    .p2align 4, , 16
+; NORMV-NEXT:  .LBB20_1: # %bb2
+; NORMV-NEXT:    # =>This Inner Loop Header: Depth=1
+; NORMV-NEXT:    addi.w $a0, $s0, 0
+; NORMV-NEXT:    bl %plt(bar)
+; NORMV-NEXT:    sll.w $s0, $s0, $fp
+; NORMV-NEXT:    bnez $a0, .LBB20_1
+; NORMV-NEXT:  # %bb.2: # %bb7
+; NORMV-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; NORMV-NEXT:    addi.d $sp, $sp, 32
+; NORMV-NEXT:    ret
 bb:
   %i = call zeroext i16 @bat(i32 signext %arg)
   %zext = zext i16 %i to i32
@@ -866,6 +1356,16 @@ define signext i32 @sextw_sh2add(i1 zeroext %0, ptr %1, i32 signext %2, i32 sign
 ; CHECK-NEXT:  .LBB21_2:
 ; CHECK-NEXT:    add.w $a0, $a2, $a4
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: sextw_sh2add:
+; NORMV:       # %bb.0:
+; NORMV-NEXT:    alsl.d $a2, $a2, $a3, 2
+; NORMV-NEXT:    beqz $a0, .LBB21_2
+; NORMV-NEXT:  # %bb.1:
+; NORMV-NEXT:    st.w $a2, $a1, 0
+; NORMV-NEXT:  .LBB21_2:
+; NORMV-NEXT:    add.w $a0, $a2, $a4
+; NORMV-NEXT:    ret
   %6 = shl i32 %2, 2
   %7 = add i32 %6, %3
   br i1 %0, label %8, label %9
@@ -901,6 +1401,28 @@ define signext i32 @test19(i64 %arg, i1 zeroext %c1, i1 zeroext %c2, ptr %p) nou
 ; CHECK-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
 ; CHECK-NEXT:    addi.d $sp, $sp, 16
 ; CHECK-NEXT:    ret
+;
+; NORMV-LABEL: test19:
+; NORMV:       # %bb.0: # %bb
+; NORMV-NEXT:    addi.d $sp, $sp, -16
+; NORMV-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; NORMV-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; NORMV-NEXT:    ori $a0, $zero, 35
+; NORMV-NEXT:    lu32i.d $a0, 1
+; NORMV-NEXT:    maskeqz $fp, $a0, $a1
+; NORMV-NEXT:    st.d $fp, $a3, 0
+; NORMV-NEXT:    beqz $a2, .LBB22_2
+; NORMV-NEXT:  # %bb.1: # %bb2
+; NORMV-NEXT:    move $a0, $zero
+; NORMV-NEXT:    bl %plt(bar)
+; NORMV-NEXT:    move $fp, $a0
+; NORMV-NEXT:  .LBB22_2: # %bb7
+; NORMV-NEXT:    bl %plt(side_effect)
+; NORMV-NEXT:    addi.w $a0, $fp, 0
+; NORMV-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; NORMV-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; NORMV-NEXT:    addi.d $sp, $sp, 16
+; NORMV-NEXT:    ret
 bb:
   %sel = select i1 %c1, i64 4294967331, i64 0
   store i64 %sel, ptr %p, align 8
diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/LoongArch/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Target/LoongArch/BUILD.gn
index 6e0efc548e3330..822c2ec8afc653 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Target/LoongArch/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Target/LoongArch/BUILD.gn
@@ -41,6 +41,7 @@ static_library("LLVMLoongArchCodeGen") {
     "LoongArchISelLowering.cpp",
     "LoongArchInstrInfo.cpp",
     "LoongArchMCInstLower.cpp",
+    "LoongArchOptWInstrs.cpp",
     "LoongArchRegisterInfo.cpp",
     "LoongArchSubtarget.cpp",
     "LoongArchTargetMachine.cpp",



More information about the llvm-commits mailing list