[llvm] 709e4ad - [LoongArch] Add codegen support for the bitwise binary operations and part of other operations

Weining Lu via llvm-commits llvm-commits at lists.llvm.org
Sun Jun 19 18:57:12 PDT 2022


Author: Weining Lu
Date: 2022-06-20T09:54:38+08:00
New Revision: 709e4ad0d9f09cc9b56c5b0c460f9419e0455a6b

URL: https://github.com/llvm/llvm-project/commit/709e4ad0d9f09cc9b56c5b0c460f9419e0455a6b
DIFF: https://github.com/llvm/llvm-project/commit/709e4ad0d9f09cc9b56c5b0c460f9419e0455a6b.diff

LOG: [LoongArch] Add codegen support for the bitwise binary operations and part of other operations

Reference:
https://llvm.org/docs/LangRef.html#bitwise-binary-operations
https://llvm.org/docs/LangRef.html#other-operations

The reason why other operations are implemented here is that some
bitwise binary operations depend on them. For example, on loongarch32,
`shl` over i64 data requires `select`.

Differential Revision: https://reviews.llvm.org/D127203

Added: 
    llvm/test/CodeGen/LoongArch/ir-instruction/and.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/ashr.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-flt.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/icmp.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/lshr.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/or.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-dbl.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-flt.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-int.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-dbl.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-flt.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-int.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-dbl.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-flt.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-int.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/shl.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/xor.ll
    llvm/test/CodeGen/LoongArch/shift-masked-shamt.ll

Modified: 
    llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
    llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
    llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
    llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
    llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
    llvm/lib/Target/LoongArch/LoongArchISelLowering.h
    llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
    llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
    llvm/lib/Target/LoongArch/LoongArchRegisterInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
index f988b637a51a3..5b117d40e0a9c 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
@@ -128,4 +128,50 @@ def : PatFprFpr<fmul, FMUL_S, FPR32>;
 def : PatFprFpr<fdiv, FDIV_S, FPR32>;
 def : PatFpr<fneg, FNEG_S, FPR32>;
 
+/// Setcc
+
+// Match non-signaling comparison
+
+// TODO: change setcc to any_fsetcc after call is supported because
+// we need to call llvm.experimental.constrained.fcmp.f32 in testcase.
+// See RISCV float-fcmp-strict.ll for reference.
+class PatFPSetcc<CondCode cc, LAInst CmpInst, RegisterClass RegTy>
+    : Pat<(setcc RegTy:$fj, RegTy:$fk, cc),
+          (MOVCF2GR (CmpInst RegTy:$fj, RegTy:$fk))>;
+// SETOGT/SETOGE/SETUGT/SETUGE will expand into SETOLT/SETOLE/SETULT/SETULE.
+def : PatFPSetcc<SETOEQ, FCMP_CEQ_S,  FPR32>;
+def : PatFPSetcc<SETOLT, FCMP_CLT_S,  FPR32>;
+def : PatFPSetcc<SETOLE, FCMP_CLE_S,  FPR32>;
+def : PatFPSetcc<SETONE, FCMP_CNE_S,  FPR32>;
+def : PatFPSetcc<SETO,   FCMP_COR_S,  FPR32>;
+def : PatFPSetcc<SETUEQ, FCMP_CUEQ_S, FPR32>;
+def : PatFPSetcc<SETULT, FCMP_CULT_S, FPR32>;
+def : PatFPSetcc<SETULE, FCMP_CULE_S, FPR32>;
+def : PatFPSetcc<SETUNE, FCMP_CUNE_S, FPR32>;
+def : PatFPSetcc<SETUO,  FCMP_CUN_S,  FPR32>;
+
+// TODO: Match signaling comparison strict_fsetccs with FCMP_S*_S instructions.
+
+/// Select
+
+def : Pat<(select GPR:$cc, FPR32:$fk, FPR32:$fj),
+          (FSEL_S FPR32:$fj, FPR32:$fk, (MOVGR2CF GPR:$cc))>;
+
+/// Selectcc
+
+class PatFPSelectcc<CondCode cc, LAInst CmpInst, LAInst SelInst,
+                    RegisterClass RegTy>
+    : Pat<(select (GRLenVT (setcc RegTy:$a, RegTy:$b, cc)), RegTy:$t, RegTy:$f),
+          (SelInst RegTy:$f, RegTy:$t, (CmpInst RegTy:$a, RegTy:$b))>;
+def : PatFPSelectcc<SETOEQ, FCMP_CEQ_S,  FSEL_S, FPR32>;
+def : PatFPSelectcc<SETOLT, FCMP_CLT_S,  FSEL_S, FPR32>;
+def : PatFPSelectcc<SETOLE, FCMP_CLE_S,  FSEL_S, FPR32>;
+def : PatFPSelectcc<SETONE, FCMP_CNE_S,  FSEL_S, FPR32>;
+def : PatFPSelectcc<SETO,   FCMP_COR_S,  FSEL_S, FPR32>;
+def : PatFPSelectcc<SETUEQ, FCMP_CUEQ_S, FSEL_S, FPR32>;
+def : PatFPSelectcc<SETULT, FCMP_CULT_S, FSEL_S, FPR32>;
+def : PatFPSelectcc<SETULE, FCMP_CULE_S, FSEL_S, FPR32>;
+def : PatFPSelectcc<SETUNE, FCMP_CUNE_S, FSEL_S, FPR32>;
+def : PatFPSelectcc<SETUO,  FCMP_CUN_S,  FSEL_S, FPR32>;
+
 } // Predicates = [HasBasicF]

diff  --git a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
index d98272f67a4b6..07fa61f4c3616 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
@@ -145,4 +145,44 @@ def : PatFprFpr<fmul, FMUL_D, FPR64>;
 def : PatFprFpr<fdiv, FDIV_D, FPR64>;
 def : PatFpr<fneg, FNEG_D, FPR64>;
 
+/// Setcc
+
+// Match non-signaling comparison
+
+// TODO: Change setcc to any_fsetcc after call is supported because
+// we need to call llvm.experimental.constrained.fcmp.f64 in testcase.
+// See RISCV float-fcmp-strict.ll for reference.
+
+// SETOGT/SETOGE/SETUGT/SETUGE will expand into SETOLT/SETOLE/SETULT/SETULE.
+def : PatFPSetcc<SETOEQ, FCMP_CEQ_D,  FPR64>;
+def : PatFPSetcc<SETOLT, FCMP_CLT_D,  FPR64>;
+def : PatFPSetcc<SETOLE, FCMP_CLE_D,  FPR64>;
+def : PatFPSetcc<SETONE, FCMP_CNE_D,  FPR64>;
+def : PatFPSetcc<SETO,   FCMP_COR_D,  FPR64>;
+def : PatFPSetcc<SETUEQ, FCMP_CUEQ_D, FPR64>;
+def : PatFPSetcc<SETULT, FCMP_CULT_D, FPR64>;
+def : PatFPSetcc<SETULE, FCMP_CULE_D, FPR64>;
+def : PatFPSetcc<SETUNE, FCMP_CUNE_D, FPR64>;
+def : PatFPSetcc<SETUO,  FCMP_CUN_D,  FPR64>;
+
+// TODO: Match signaling comparison strict_fsetccs with FCMP_S*_D instructions.
+
+/// Select
+
+def : Pat<(select GPR:$cc, FPR64:$fk, FPR64:$fj),
+          (FSEL_D FPR64:$fj, FPR64:$fk, (MOVGR2CF GPR:$cc))>;
+
+/// Selectcc
+
+def : PatFPSelectcc<SETOEQ, FCMP_CEQ_D,  FSEL_D, FPR64>;
+def : PatFPSelectcc<SETOLT, FCMP_CLT_D,  FSEL_D, FPR64>;
+def : PatFPSelectcc<SETOLE, FCMP_CLE_D,  FSEL_D, FPR64>;
+def : PatFPSelectcc<SETONE, FCMP_CNE_D,  FSEL_D, FPR64>;
+def : PatFPSelectcc<SETO,   FCMP_COR_D,  FSEL_D, FPR64>;
+def : PatFPSelectcc<SETUEQ, FCMP_CUEQ_D, FSEL_D, FPR64>;
+def : PatFPSelectcc<SETULT, FCMP_CULT_D, FSEL_D, FPR64>;
+def : PatFPSelectcc<SETULE, FCMP_CULE_D, FSEL_D, FPR64>;
+def : PatFPSelectcc<SETUNE, FCMP_CUNE_D, FSEL_D, FPR64>;
+def : PatFPSelectcc<SETUO,  FCMP_CUN_D,  FSEL_D, FPR64>;
+
 } // Predicates = [HasBasicD]

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
index 2e5b3ead40d3e..a1fb676add074 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
@@ -13,6 +13,7 @@
 #include "LoongArchISelDAGToDAG.h"
 #include "MCTargetDesc/LoongArchMCTargetDesc.h"
 #include "MCTargetDesc/LoongArchMatInt.h"
+#include "llvm/Support/KnownBits.h"
 
 using namespace llvm;
 
@@ -64,6 +65,54 @@ void LoongArchDAGToDAGISel::Select(SDNode *Node) {
   // Select the default instruction.
   SelectCode(Node);
 }
+
+bool LoongArchDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth,
+                                            SDValue &ShAmt) {
+  // Shift instructions on LoongArch only read the lower 5 or 6 bits of the
+  // shift amount. If there is an AND on the shift amount, we can bypass it if
+  // it doesn't affect any of those bits.
+  if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
+    const APInt &AndMask = N->getConstantOperandAPInt(1);
+
+    // Since the max shift amount is a power of 2 we can subtract 1 to make a
+    // mask that covers the bits needed to represent all shift amounts.
+    assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
+    APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
+
+    if (ShMask.isSubsetOf(AndMask)) {
+      ShAmt = N.getOperand(0);
+      return true;
+    }
+
+    // SimplifyDemandedBits may have optimized the mask so try restoring any
+    // bits that are known zero.
+    KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0));
+    if (ShMask.isSubsetOf(AndMask | Known.Zero)) {
+      ShAmt = N.getOperand(0);
+      return true;
+    }
+  } else if (N.getOpcode() == ISD::SUB &&
+             isa<ConstantSDNode>(N.getOperand(0))) {
+    uint64_t Imm = N.getConstantOperandVal(0);
+    // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
+    // generate a NEG instead of a SUB of a constant.
+    if (Imm != 0 && Imm % ShiftWidth == 0) {
+      SDLoc DL(N);
+      EVT VT = N.getValueType();
+      SDValue Zero =
+          CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, LoongArch::R0, VT);
+      unsigned NegOpc = VT == MVT::i64 ? LoongArch::SUB_D : LoongArch::SUB_W;
+      MachineSDNode *Neg =
+          CurDAG->getMachineNode(NegOpc, DL, VT, Zero, N.getOperand(1));
+      ShAmt = SDValue(Neg, 0);
+      return true;
+    }
+  }
+
+  ShAmt = N;
+  return true;
+}
+
 // This pass converts a legalized DAG into a LoongArch-specific DAG, ready
 // for instruction scheduling.
 FunctionPass *llvm::createLoongArchISelDag(LoongArchTargetMachine &TM) {

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
index 673172aeac5d4..f477129d933c4 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
@@ -38,6 +38,14 @@ class LoongArchDAGToDAGISel : public SelectionDAGISel {
 
   void Select(SDNode *Node) override;
 
+  bool selectShiftMask(SDValue N, unsigned ShiftWidth, SDValue &ShAmt);
+  bool selectShiftMaskGRLen(SDValue N, SDValue &ShAmt) {
+    return selectShiftMask(N, Subtarget->getGRLen(), ShAmt);
+  }
+  bool selectShiftMask32(SDValue N, SDValue &ShAmt) {
+    return selectShiftMask(N, 32, ShAmt);
+  }
+
 // Include the pieces autogenerated from the target description.
 #include "LoongArchGenDAGISel.inc"
 };

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 714042137137a..bc043efe20ccf 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -18,6 +18,7 @@
 #include "LoongArchSubtarget.h"
 #include "LoongArchTargetMachine.h"
 #include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
 #include "llvm/Support/Debug.h"
 
 using namespace llvm;
@@ -37,6 +38,29 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
     addRegisterClass(MVT::f64, &LoongArch::FPR64RegClass);
 
   // TODO: add necessary setOperationAction calls later.
+  setOperationAction(ISD::SHL_PARTS, GRLenVT, Custom);
+  setOperationAction(ISD::SRA_PARTS, GRLenVT, Custom);
+  setOperationAction(ISD::SRL_PARTS, GRLenVT, Custom);
+
+  if (Subtarget.is64Bit()) {
+    setOperationAction(ISD::SHL, MVT::i32, Custom);
+    setOperationAction(ISD::SRA, MVT::i32, Custom);
+    setOperationAction(ISD::SRL, MVT::i32, Custom);
+  }
+
+  static const ISD::CondCode FPCCToExpand[] = {ISD::SETOGT, ISD::SETOGE,
+                                               ISD::SETUGT, ISD::SETUGE};
+
+  if (Subtarget.hasBasicF()) {
+    setCondCodeAction(FPCCToExpand, MVT::f32, Expand);
+    setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
+  }
+  if (Subtarget.hasBasicD()) {
+    setCondCodeAction(FPCCToExpand, MVT::f64, Expand);
+    setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
+  }
+
+  setOperationAction(ISD::SELECT_CC, GRLenVT, Expand);
 
   // Compute derived properties from the register classes.
   computeRegisterProperties(STI.getRegisterInfo());
@@ -50,6 +74,169 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
   setMinFunctionAlignment(FunctionAlignment);
 }
 
+SDValue LoongArchTargetLowering::LowerOperation(SDValue Op,
+                                                SelectionDAG &DAG) const {
+  switch (Op.getOpcode()) {
+  default:
+    report_fatal_error("unimplemented operand");
+  case ISD::SHL_PARTS:
+    return lowerShiftLeftParts(Op, DAG);
+  case ISD::SRA_PARTS:
+    return lowerShiftRightParts(Op, DAG, true);
+  case ISD::SRL_PARTS:
+    return lowerShiftRightParts(Op, DAG, false);
+  case ISD::SHL:
+  case ISD::SRA:
+  case ISD::SRL:
+    // This can be called for an i32 shift amount that needs to be promoted.
+    assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
+           "Unexpected custom legalisation");
+    return SDValue();
+  }
+}
+
+SDValue LoongArchTargetLowering::lowerShiftLeftParts(SDValue Op,
+                                                     SelectionDAG &DAG) const {
+  SDLoc DL(Op);
+  SDValue Lo = Op.getOperand(0);
+  SDValue Hi = Op.getOperand(1);
+  SDValue Shamt = Op.getOperand(2);
+  EVT VT = Lo.getValueType();
+
+  // if Shamt-GRLen < 0: // Shamt < GRLen
+  //   Lo = Lo << Shamt
+  //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (GRLen-1 ^ Shamt))
+  // else:
+  //   Lo = 0
+  //   Hi = Lo << (Shamt-GRLen)
+
+  SDValue Zero = DAG.getConstant(0, DL, VT);
+  SDValue One = DAG.getConstant(1, DL, VT);
+  SDValue MinusGRLen = DAG.getConstant(-(int)Subtarget.getGRLen(), DL, VT);
+  SDValue GRLenMinus1 = DAG.getConstant(Subtarget.getGRLen() - 1, DL, VT);
+  SDValue ShamtMinusGRLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusGRLen);
+  SDValue GRLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, GRLenMinus1);
+
+  SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
+  SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
+  SDValue ShiftRightLo =
+      DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, GRLenMinus1Shamt);
+  SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
+  SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
+  SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusGRLen);
+
+  SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusGRLen, Zero, ISD::SETLT);
+
+  Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
+  Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
+
+  SDValue Parts[2] = {Lo, Hi};
+  return DAG.getMergeValues(Parts, DL);
+}
+
+SDValue LoongArchTargetLowering::lowerShiftRightParts(SDValue Op,
+                                                      SelectionDAG &DAG,
+                                                      bool IsSRA) const {
+  SDLoc DL(Op);
+  SDValue Lo = Op.getOperand(0);
+  SDValue Hi = Op.getOperand(1);
+  SDValue Shamt = Op.getOperand(2);
+  EVT VT = Lo.getValueType();
+
+  // SRA expansion:
+  //   if Shamt-GRLen < 0: // Shamt < GRLen
+  //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ GRLen-1))
+  //     Hi = Hi >>s Shamt
+  //   else:
+  //     Lo = Hi >>s (Shamt-GRLen);
+  //     Hi = Hi >>s (GRLen-1)
+  //
+  // SRL expansion:
+  //   if Shamt-GRLen < 0: // Shamt < GRLen
+  //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ GRLen-1))
+  //     Hi = Hi >>u Shamt
+  //   else:
+  //     Lo = Hi >>u (Shamt-GRLen);
+  //     Hi = 0;
+
+  unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
+
+  SDValue Zero = DAG.getConstant(0, DL, VT);
+  SDValue One = DAG.getConstant(1, DL, VT);
+  SDValue MinusGRLen = DAG.getConstant(-(int)Subtarget.getGRLen(), DL, VT);
+  SDValue GRLenMinus1 = DAG.getConstant(Subtarget.getGRLen() - 1, DL, VT);
+  SDValue ShamtMinusGRLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusGRLen);
+  SDValue GRLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, GRLenMinus1);
+
+  SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
+  SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
+  SDValue ShiftLeftHi =
+      DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, GRLenMinus1Shamt);
+  SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
+  SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
+  SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusGRLen);
+  SDValue HiFalse =
+      IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, GRLenMinus1) : Zero;
+
+  SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusGRLen, Zero, ISD::SETLT);
+
+  Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
+  Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
+
+  SDValue Parts[2] = {Lo, Hi};
+  return DAG.getMergeValues(Parts, DL);
+}
+
+// Returns the opcode of the target-specific SDNode that implements the 32-bit
+// form of the given Opcode.
+static LoongArchISD::NodeType getLoongArchWOpcode(unsigned Opcode) {
+  switch (Opcode) {
+  default:
+    llvm_unreachable("Unexpected opcode");
+  case ISD::SHL:
+    return LoongArchISD::SLL_W;
+  case ISD::SRA:
+    return LoongArchISD::SRA_W;
+  case ISD::SRL:
+    return LoongArchISD::SRL_W;
+  }
+}
+
+// Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
+// node. Because i8/i16/i32 isn't a legal type for LA64, these operations would
+// otherwise be promoted to i64, making it 
diff icult to select the
+// SLL_W/.../*W later one because the fact the operation was originally of
+// type i8/i16/i32 is lost.
+static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
+                                   unsigned ExtOpc = ISD::ANY_EXTEND) {
+  SDLoc DL(N);
+  LoongArchISD::NodeType WOpcode = getLoongArchWOpcode(N->getOpcode());
+  SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
+  SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
+  SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
+  // ReplaceNodeResults requires we maintain the same type for the return value.
+  return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
+}
+
+void LoongArchTargetLowering::ReplaceNodeResults(
+    SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
+  SDLoc DL(N);
+  switch (N->getOpcode()) {
+  default:
+    llvm_unreachable("Don't know how to legalize this operation");
+  case ISD::SHL:
+  case ISD::SRA:
+  case ISD::SRL:
+    assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+           "Unexpected custom legalisation");
+    if (N->getOperand(1).getOpcode() != ISD::Constant) {
+      Results.push_back(customLegalizeToWOp(N, DAG));
+      break;
+    }
+    break;
+  }
+}
+
 const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const {
   switch ((LoongArchISD::NodeType)Opcode) {
   case LoongArchISD::FIRST_NUMBER:
@@ -61,6 +248,9 @@ const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const {
 
     // TODO: Add more target-dependent nodes later.
     NODE_NAME_CASE(RET)
+    NODE_NAME_CASE(SLL_W)
+    NODE_NAME_CASE(SRA_W)
+    NODE_NAME_CASE(SRL_W)
   }
 #undef NODE_NAME_CASE
   return nullptr;

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 964d50f55eed7..5693bc58ed63c 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -28,6 +28,11 @@ enum NodeType : unsigned {
 
   // TODO: add more LoongArchISDs
   RET,
+  // 32-bit shifts, directly matching the semantics of the named LoongArch
+  // instructions.
+  SLL_W,
+  SRA_W,
+  SRL_W,
 
 };
 } // namespace LoongArchISD
@@ -41,6 +46,11 @@ class LoongArchTargetLowering : public TargetLowering {
 
   const LoongArchSubtarget &getSubtarget() const { return Subtarget; }
 
+  // Provide custom lowering hooks for some operations.
+  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+  void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
+                          SelectionDAG &DAG) const override;
+
   // This method returns the name of a target specific DAG node.
   const char *getTargetNodeName(unsigned Opcode) const override;
 
@@ -71,6 +81,9 @@ class LoongArchTargetLowering : public TargetLowering {
   void analyzeOutputArgs(CCState &CCInfo,
                          const SmallVectorImpl<ISD::OutputArg> &Outs,
                          LoongArchCCAssignFn Fn) const;
+
+  SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
 };
 
 } // end namespace llvm

diff  --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
index 36a1eeed13dc0..146ef53befd51 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
@@ -33,6 +33,17 @@ void LoongArchInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
     return;
   }
 
-  // TODO: Now, we only support GPR->GPR copies.
-  llvm_unreachable("LoongArch didn't implement copyPhysReg");
+  // FPR->FPR copies.
+  unsigned Opc;
+  if (LoongArch::FPR32RegClass.contains(DstReg, SrcReg)) {
+    Opc = LoongArch::FMOV_S;
+  } else if (LoongArch::FPR64RegClass.contains(DstReg, SrcReg)) {
+    Opc = LoongArch::FMOV_D;
+  } else {
+    // TODO: support other copies.
+    llvm_unreachable("Impossible reg-to-reg copy");
+  }
+
+  BuildMI(MBB, MBBI, DL, get(Opc), DstReg)
+      .addReg(SrcReg, getKillRegState(KillSrc));
 }

diff  --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index 265651cdc3f93..dda6913f8c5ad 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -14,10 +14,18 @@
 // LoongArch specific DAG Nodes.
 //===----------------------------------------------------------------------===//
 
+// Target-dependent type requirements.
+def SDT_LoongArchIntBinOpW : SDTypeProfile<1, 2, [
+  SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64>
+]>;
+
 // TODO: Add LoongArch specific DAG Nodes
 // Target-dependent nodes.
 def loongarch_ret : SDNode<"LoongArchISD::RET", SDTNone,
                            [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+def loongarch_sll_w : SDNode<"LoongArchISD::SLL_W", SDT_LoongArchIntBinOpW>;
+def loongarch_sra_w : SDNode<"LoongArchISD::SRA_W", SDT_LoongArchIntBinOpW>;
+def loongarch_srl_w : SDNode<"LoongArchISD::SRL_W", SDT_LoongArchIntBinOpW>;
 
 //===----------------------------------------------------------------------===//
 // Operand and SDNode transformation definitions.
@@ -52,11 +60,11 @@ def uimm3 : Operand<GRLenVT> {
   let ParserMatchClass = UImmAsmOperand<3>;
 }
 
-def uimm5 : Operand<GRLenVT> {
+def uimm5 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<5>(Imm);}]> {
   let ParserMatchClass = UImmAsmOperand<5>;
 }
 
-def uimm6 : Operand<GRLenVT> {
+def uimm6 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<6>(Imm);}]> {
   let ParserMatchClass = UImmAsmOperand<6>;
 }
 
@@ -64,7 +72,7 @@ def uimm8 : Operand<GRLenVT> {
   let ParserMatchClass = UImmAsmOperand<8>;
 }
 
-def uimm12 : Operand<GRLenVT> {
+def uimm12 : Operand<GRLenVT>, ImmLeaf<GRLenVT, [{return isUInt<12>(Imm);}]> {
   let ParserMatchClass = UImmAsmOperand<12>;
 }
 
@@ -115,6 +123,18 @@ def simm26_lsl2 : Operand<GRLenVT> {
   let DecoderMethod = "decodeSImmOperand<26, 2>";
 }
 
+// Standalone (codegen-only) immleaf patterns.
+
+// A 12-bit signed immediate plus one where the imm range will be [-2047, 2048].
+def simm12_plus1 : ImmLeaf<GRLenVT,
+  [{return (isInt<12>(Imm) && Imm != -2048) || Imm == 2048;}]>;
+
+// Return the negation of an immediate value.
+def NegImm : SDNodeXForm<imm, [{
+  return CurDAG->getTargetConstant(-N->getSExtValue(), SDLoc(N),
+                                   N->getValueType(0));
+}]>;
+
 //===----------------------------------------------------------------------===//
 // Instruction Formats
 //===----------------------------------------------------------------------===//
@@ -511,6 +531,21 @@ class PatGprImm_32<SDPatternOperator OpNode, LAInst Inst, Operand ImmOpnd>
 
 /// Simple arithmetic operations
 
+// Match both a plain shift and one where the shift amount is masked (this is
+// typically introduced when the legalizer promotes the shift amount and
+// zero-extends it). For LoongArch, the mask is unnecessary as shifts in the
+// base ISA only read the least significant 5 bits (LA32) or 6 bits (LA64).
+def shiftMaskGRLen
+    : ComplexPattern<GRLenVT, 1, "selectShiftMaskGRLen", [], [], 0>;
+def shiftMask32 : ComplexPattern<i64, 1, "selectShiftMask32", [], [], 0>;
+
+class shiftop<SDPatternOperator operator>
+    : PatFrag<(ops node:$val, node:$count),
+              (operator node:$val, (GRLenVT (shiftMaskGRLen node:$count)))>;
+class shiftopw<SDPatternOperator operator>
+    : PatFrag<(ops node:$val, node:$count),
+              (operator node:$val, (i64 (shiftMask32 node:$count)))>;
+
 let Predicates = [IsLA32] in {
 def : PatGprGpr<add, ADD_W>;
 def : PatGprImm<add, ADDI_W, simm12>;
@@ -527,18 +562,84 @@ def : PatGprGpr_32<sub, SUB_W>;
 } // Predicates = [IsLA64]
 
 def : PatGprGpr<and, AND>;
+def : PatGprImm<and, ANDI, uimm12>;
 def : PatGprGpr<or, OR>;
+def : PatGprImm<or, ORI, uimm12>;
 def : PatGprGpr<xor, XOR>;
+def : PatGprImm<xor, XORI, uimm12>;
+
+/// Shift
+
+let Predicates = [IsLA32] in {
+def : PatGprGpr<shiftop<shl>, SLL_W>;
+def : PatGprGpr<shiftop<sra>, SRA_W>;
+def : PatGprGpr<shiftop<srl>, SRL_W>;
+def : PatGprImm<shl, SLLI_W, uimm5>;
+def : PatGprImm<sra, SRAI_W, uimm5>;
+def : PatGprImm<srl, SRLI_W, uimm5>;
+} // Predicates = [IsLA32]
+
+let Predicates = [IsLA64] in {
+def : PatGprGpr<shiftopw<loongarch_sll_w>, SLL_W>;
+def : PatGprGpr<shiftopw<loongarch_sra_w>, SRA_W>;
+def : PatGprGpr<shiftopw<loongarch_srl_w>, SRL_W>;
+def : PatGprGpr<shiftop<shl>, SLL_D>;
+def : PatGprGpr<shiftop<sra>, SRA_D>;
+def : PatGprGpr<shiftop<srl>, SRL_D>;
+def : PatGprImm<shl, SLLI_D, uimm6>;
+def : PatGprImm<sra, SRAI_D, uimm6>;
+def : PatGprImm<srl, SRLI_D, uimm6>;
+} // Predicates = [IsLA64]
 
 /// sext and zext
 
+def : Pat<(sext_inreg GPR:$rj, i8), (EXT_W_B GPR:$rj)>;
+def : Pat<(sext_inreg GPR:$rj, i16), (EXT_W_H GPR:$rj)>;
+
 let Predicates = [IsLA64] in {
 def : Pat<(sext_inreg GPR:$rj, i32), (ADDI_W GPR:$rj, 0)>;
 } // Predicates = [IsLA64]
 
 /// Setcc
 
+def : PatGprGpr<setlt, SLT>;
+def : PatGprImm<setlt, SLTI, simm12>;
 def : PatGprGpr<setult, SLTU>;
+def : PatGprImm<setult, SLTUI, simm12>;
+
+// Define pattern expansions for setcc operations that aren't directly
+// handled by a LoongArch instruction.
+def : Pat<(seteq GPR:$rj, 0), (SLTUI GPR:$rj, 1)>;
+def : Pat<(seteq GPR:$rj, GPR:$rk), (SLTUI (XOR GPR:$rj, GPR:$rk), 1)>;
+let Predicates = [IsLA32] in {
+def : Pat<(seteq GPR:$rj, simm12_plus1:$imm12),
+          (SLTUI (ADDI_W GPR:$rj, (NegImm simm12_plus1:$imm12)), 1)>;
+} // Predicates = [IsLA32]
+let Predicates = [IsLA64] in {
+def : Pat<(seteq GPR:$rj, simm12_plus1:$imm12),
+          (SLTUI (ADDI_D GPR:$rj, (NegImm simm12_plus1:$imm12)), 1)>;
+} // Predicates = [IsLA64]
+def : Pat<(setne GPR:$rj, 0), (SLTU R0, GPR:$rj)>;
+def : Pat<(setne GPR:$rj, GPR:$rk), (SLTU R0, (XOR GPR:$rj, GPR:$rk))>;
+let Predicates = [IsLA32] in {
+def : Pat<(setne GPR:$rj, simm12_plus1:$imm12),
+          (SLTU R0, (ADDI_W GPR:$rj, (NegImm simm12_plus1:$imm12)))>;
+} // Predicates = [IsLA32]
+let Predicates = [IsLA64] in {
+def : Pat<(setne GPR:$rj, simm12_plus1:$imm12),
+          (SLTU R0, (ADDI_D GPR:$rj, (NegImm simm12_plus1:$imm12)))>;
+} // Predicates = [IsLA64]
+def : Pat<(setugt GPR:$rj, GPR:$rk), (SLTU GPR:$rk, GPR:$rj)>;
+def : Pat<(setuge GPR:$rj, GPR:$rk), (XORI (SLTU GPR:$rj, GPR:$rk), 1)>;
+def : Pat<(setule GPR:$rj, GPR:$rk), (XORI (SLTU GPR:$rk, GPR:$rj), 1)>;
+def : Pat<(setgt GPR:$rj, GPR:$rk), (SLT GPR:$rk, GPR:$rj)>;
+def : Pat<(setge GPR:$rj, GPR:$rk), (XORI (SLT GPR:$rj, GPR:$rk), 1)>;
+def : Pat<(setle GPR:$rj, GPR:$rk), (XORI (SLT GPR:$rk, GPR:$rj), 1)>;
+
+/// Select
+
+def : Pat<(select GPR:$cond, GPR:$t, GPR:$f),
+          (OR (MASKEQZ GPR:$t, GPR:$cond), (MASKNEZ GPR:$f, GPR:$cond))>;
 
 /// Branches and jumps
 

diff  --git a/llvm/lib/Target/LoongArch/LoongArchRegisterInfo.td b/llvm/lib/Target/LoongArch/LoongArchRegisterInfo.td
index 02c9792bff319..2d5ad99f61560 100644
--- a/llvm/lib/Target/LoongArch/LoongArchRegisterInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchRegisterInfo.td
@@ -148,7 +148,9 @@ def FPR64 : RegisterClass<"LoongArch", [f64], 64, (sequence "F%u_64", 0, 31)>;
 foreach I = 0-7 in
 def FCC#I : LoongArchReg<I, "fcc"#I>;
 
-def CFR : RegisterClass<"LoongArch", [i32], 32, (sequence "FCC%u", 0, 7)>;
+def CFR : RegisterClass<"LoongArch", [GRLenVT], 32, (sequence "FCC%u", 0, 7)> {
+  let RegInfos = GRLenRI;
+}
 
 // Control and status registers
 

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/and.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/and.ll
new file mode 100644
index 0000000000000..e5c9da58c7570
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/and.ll
@@ -0,0 +1,266 @@
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+;; Exercise the 'and' LLVM IR: https://llvm.org/docs/LangRef.html#and-instruction
+
+define i1 @and_i1(i1 %a, i1 %b) {
+; LA32-LABEL: and_i1:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    and $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i1:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    and $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i1 %a, %b
+  ret i1 %r
+}
+
+define i8 @and_i8(i8 %a, i8 %b) {
+; LA32-LABEL: and_i8:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    and $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i8:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    and $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i8 %a, %b
+  ret i8 %r
+}
+
+define i16 @and_i16(i16 %a, i16 %b) {
+; LA32-LABEL: and_i16:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    and $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i16:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    and $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i16 %a, %b
+  ret i16 %r
+}
+
+define i32 @and_i32(i32 %a, i32 %b) {
+; LA32-LABEL: and_i32:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    and $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i32:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    and $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i32 %a, %b
+  ret i32 %r
+}
+
+define i64 @and_i64(i64 %a, i64 %b) {
+; LA32-LABEL: and_i64:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    and $a0, $a0, $a2
+; LA32-NEXT:    and $a1, $a1, $a3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i64:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    and $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i64 %a, %b
+  ret i64 %r
+}
+
+define i1 @and_i1_0(i1 %b) {
+; LA32-LABEL: and_i1_0:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    move $a0, $zero
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i1_0:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    move $a0, $zero
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i1 4, %b
+  ret i1 %r
+}
+
+define i1 @and_i1_5(i1 %b) {
+; LA32-LABEL: and_i1_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i1_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i1 5, %b
+  ret i1 %r
+}
+
+define i8 @and_i8_5(i8 %b) {
+; LA32-LABEL: and_i8_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    andi $a0, $a0, 5
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i8_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    andi $a0, $a0, 5
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i8 5, %b
+  ret i8 %r
+}
+
+define i8 @and_i8_257(i8 %b) {
+; LA32-LABEL: and_i8_257:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    andi $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i8_257:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    andi $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i8 257, %b
+  ret i8 %r
+}
+
+define i16 @and_i16_5(i16 %b) {
+; LA32-LABEL: and_i16_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    andi $a0, $a0, 5
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i16_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    andi $a0, $a0, 5
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i16 5, %b
+  ret i16 %r
+}
+
+define i16 @and_i16_0x1000(i16 %b) {
+; LA32-LABEL: and_i16_0x1000:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    lu12i.w $a1, 1
+; LA32-NEXT:    and $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i16_0x1000:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    lu12i.w $a1, 1
+; LA64-NEXT:    and $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i16 4096, %b
+  ret i16 %r
+}
+
+define i16 @and_i16_0x10001(i16 %b) {
+; LA32-LABEL: and_i16_0x10001:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    andi $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i16_0x10001:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    andi $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i16 65537, %b
+  ret i16 %r
+}
+
+define i32 @and_i32_5(i32 %b) {
+; LA32-LABEL: and_i32_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    andi $a0, $a0, 5
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i32_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    andi $a0, $a0, 5
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i32 5, %b
+  ret i32 %r
+}
+
+define i32 @and_i32_0x1000(i32 %b) {
+; LA32-LABEL: and_i32_0x1000:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    lu12i.w $a1, 1
+; LA32-NEXT:    and $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i32_0x1000:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    lu12i.w $a1, 1
+; LA64-NEXT:    and $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i32 4096, %b
+  ret i32 %r
+}
+
+define i32 @and_i32_0x100000001(i32 %b) {
+; LA32-LABEL: and_i32_0x100000001:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    andi $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i32_0x100000001:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    andi $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i32 4294967297, %b
+  ret i32 %r
+}
+
+define i64 @and_i64_5(i64 %b) {
+; LA32-LABEL: and_i64_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    andi $a0, $a0, 5
+; LA32-NEXT:    move $a1, $zero
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i64_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    andi $a0, $a0, 5
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i64 5, %b
+  ret i64 %r
+}
+
+define i64 @and_i64_0x1000(i64 %b) {
+; LA32-LABEL: and_i64_0x1000:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    lu12i.w $a1, 1
+; LA32-NEXT:    and $a0, $a0, $a1
+; LA32-NEXT:    move $a1, $zero
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: and_i64_0x1000:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    lu12i.w $a1, 1
+; LA64-NEXT:    and $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = and i64 4096, %b
+  ret i64 %r
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/ashr.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/ashr.ll
new file mode 100644
index 0000000000000..1b7e8085185a0
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/ashr.ll
@@ -0,0 +1,168 @@
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+;; Exercise the 'ashr' LLVM IR: https://llvm.org/docs/LangRef.html#ashr-instruction
+
+define i1 @ashr_i1(i1 %x, i1 %y) {
+; LA32-LABEL: ashr_i1:
+; LA32:       # %bb.0:
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: ashr_i1:
+; LA64:       # %bb.0:
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %ashr = ashr i1 %x, %y
+  ret i1 %ashr
+}
+
+define i8 @ashr_i8(i8 %x, i8 %y) {
+; LA32-LABEL: ashr_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ext.w.b $a0, $a0
+; LA32-NEXT:    sra.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: ashr_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ext.w.b $a0, $a0
+; LA64-NEXT:    sra.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %ashr = ashr i8 %x, %y
+  ret i8 %ashr
+}
+
+define i16 @ashr_i16(i16 %x, i16 %y) {
+; LA32-LABEL: ashr_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ext.w.h $a0, $a0
+; LA32-NEXT:    sra.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: ashr_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ext.w.h $a0, $a0
+; LA64-NEXT:    sra.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %ashr = ashr i16 %x, %y
+  ret i16 %ashr
+}
+
+define i32 @ashr_i32(i32 %x, i32 %y) {
+; LA32-LABEL: ashr_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sra.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: ashr_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sra.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %ashr = ashr i32 %x, %y
+  ret i32 %ashr
+}
+
+define i64 @ashr_i64(i64 %x, i64 %y) {
+; LA32-LABEL: ashr_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    srai.w $a3, $a1, 31
+; LA32-NEXT:    addi.w $a4, $a2, -32
+; LA32-NEXT:    slti $a5, $a4, 0
+; LA32-NEXT:    masknez $a3, $a3, $a5
+; LA32-NEXT:    sra.w $a6, $a1, $a2
+; LA32-NEXT:    maskeqz $a6, $a6, $a5
+; LA32-NEXT:    or $a3, $a6, $a3
+; LA32-NEXT:    srl.w $a0, $a0, $a2
+; LA32-NEXT:    xori $a2, $a2, 31
+; LA32-NEXT:    slli.w $a6, $a1, 1
+; LA32-NEXT:    sll.w $a2, $a6, $a2
+; LA32-NEXT:    or $a0, $a0, $a2
+; LA32-NEXT:    sra.w $a1, $a1, $a4
+; LA32-NEXT:    maskeqz $a0, $a0, $a5
+; LA32-NEXT:    masknez $a1, $a1, $a5
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    move $a1, $a3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: ashr_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sra.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %ashr = ashr i64 %x, %y
+  ret i64 %ashr
+}
+
+define i1 @ashr_i1_3(i1 %x) {
+; LA32-LABEL: ashr_i1_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: ashr_i1_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %ashr = ashr i1 %x, 3
+  ret i1 %ashr
+}
+
+define i8 @ashr_i8_3(i8 %x) {
+; LA32-LABEL: ashr_i8_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ext.w.b $a0, $a0
+; LA32-NEXT:    srai.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: ashr_i8_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ext.w.b $a0, $a0
+; LA64-NEXT:    srai.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %ashr = ashr i8 %x, 3
+  ret i8 %ashr
+}
+
+define i16 @ashr_i16_3(i16 %x) {
+; LA32-LABEL: ashr_i16_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ext.w.h $a0, $a0
+; LA32-NEXT:    srai.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: ashr_i16_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ext.w.h $a0, $a0
+; LA64-NEXT:    srai.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %ashr = ashr i16 %x, 3
+  ret i16 %ashr
+}
+
+define i32 @ashr_i32_3(i32 %x) {
+; LA32-LABEL: ashr_i32_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    srai.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: ashr_i32_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.w $a0, $a0, 0
+; LA64-NEXT:    srai.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %ashr = ashr i32 %x, 3
+  ret i32 %ashr
+}
+
+define i64 @ashr_i64_3(i64 %x) {
+; LA32-LABEL: ashr_i64_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    srli.w $a0, $a0, 3
+; LA32-NEXT:    slli.w $a2, $a1, 29
+; LA32-NEXT:    or $a0, $a0, $a2
+; LA32-NEXT:    srai.w $a1, $a1, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: ashr_i64_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    srai.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %ashr = ashr i64 %x, 3
+  ret i64 %ashr
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll
new file mode 100644
index 0000000000000..bb35405abc018
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll
@@ -0,0 +1,257 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+;; Test the 'fcmp' LLVM IR: https://llvm.org/docs/LangRef.html#fcmp-instruction
+;; over double values.
+
+define i1 @fcmp_false(double %a, double %b) {
+; LA32-LABEL: fcmp_false:
+; LA32:       # %bb.0:
+; LA32-NEXT:    move $a0, $zero
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_false:
+; LA64:       # %bb.0:
+; LA64-NEXT:    move $a0, $zero
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp false double %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_oeq(double %a, double %b) {
+; LA32-LABEL: fcmp_oeq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.ceq.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_oeq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.ceq.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp oeq double %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_ogt(double %a, double %b) {
+; LA32-LABEL: fcmp_ogt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ogt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ogt double %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_oge(double %a, double %b) {
+; LA32-LABEL: fcmp_oge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_oge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp oge double %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_olt(double %a, double %b) {
+; LA32-LABEL: fcmp_olt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_olt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp olt double %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_ole(double %a, double %b) {
+; LA32-LABEL: fcmp_ole:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ole:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ole double %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_one(double %a, double %b) {
+; LA32-LABEL: fcmp_one:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cne.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_one:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cne.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp one double %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_ord(double %a, double %b) {
+; LA32-LABEL: fcmp_ord:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cor.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ord:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cor.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ord double %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_ueq(double %a, double %b) {
+; LA32-LABEL: fcmp_ueq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cueq.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ueq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cueq.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ueq double %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_ugt(double %a, double %b) {
+; LA32-LABEL: fcmp_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ugt double %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_uge(double %a, double %b) {
+; LA32-LABEL: fcmp_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp uge double %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_ult(double %a, double %b) {
+; LA32-LABEL: fcmp_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ult double %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_ule(double %a, double %b) {
+; LA32-LABEL: fcmp_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ule double %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_une(double %a, double %b) {
+; LA32-LABEL: fcmp_une:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cune.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_une:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cune.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp une double %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_uno(double %a, double %b) {
+; LA32-LABEL: fcmp_uno:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cun.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_uno:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cun.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp uno double %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_true(double %a, double %b) {
+; LA32-LABEL: fcmp_true:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ori $a0, $zero, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_true:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ori $a0, $zero, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp true double %a, %b
+  ret i1 %cmp
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-flt.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-flt.ll
new file mode 100644
index 0000000000000..33bdd0b50bd49
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-flt.ll
@@ -0,0 +1,257 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64
+
+;; Test the 'fcmp' LLVM IR: https://llvm.org/docs/LangRef.html#fcmp-instruction
+;; over float values.
+
+define i1 @fcmp_false(float %a, float %b) {
+; LA32-LABEL: fcmp_false:
+; LA32:       # %bb.0:
+; LA32-NEXT:    move $a0, $zero
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_false:
+; LA64:       # %bb.0:
+; LA64-NEXT:    move $a0, $zero
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp false float %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_oeq(float %a, float %b) {
+; LA32-LABEL: fcmp_oeq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.ceq.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_oeq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.ceq.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp oeq float %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_ogt(float %a, float %b) {
+; LA32-LABEL: fcmp_ogt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ogt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ogt float %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_oge(float %a, float %b) {
+; LA32-LABEL: fcmp_oge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_oge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp oge float %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_olt(float %a, float %b) {
+; LA32-LABEL: fcmp_olt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_olt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp olt float %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_ole(float %a, float %b) {
+; LA32-LABEL: fcmp_ole:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ole:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ole float %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_one(float %a, float %b) {
+; LA32-LABEL: fcmp_one:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cne.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_one:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cne.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp one float %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_ord(float %a, float %b) {
+; LA32-LABEL: fcmp_ord:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cor.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ord:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cor.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ord float %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_ueq(float %a, float %b) {
+; LA32-LABEL: fcmp_ueq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cueq.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ueq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cueq.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ueq float %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_ugt(float %a, float %b) {
+; LA32-LABEL: fcmp_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ugt float %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_uge(float %a, float %b) {
+; LA32-LABEL: fcmp_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp uge float %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_ult(float %a, float %b) {
+; LA32-LABEL: fcmp_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ult float %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_ule(float %a, float %b) {
+; LA32-LABEL: fcmp_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ule float %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_une(float %a, float %b) {
+; LA32-LABEL: fcmp_une:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cune.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_une:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cune.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp une float %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_uno(float %a, float %b) {
+; LA32-LABEL: fcmp_uno:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cun.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_uno:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cun.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp uno float %a, %b
+  ret i1 %cmp
+}
+
+define i1 @fcmp_true(float %a, float %b) {
+; LA32-LABEL: fcmp_true:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ori $a0, $zero, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_true:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ori $a0, $zero, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp true float %a, %b
+  ret i1 %cmp
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/icmp.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/icmp.ll
new file mode 100644
index 0000000000000..947886e6b9dc5
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/icmp.ll
@@ -0,0 +1,244 @@
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+;; Exercise the 'icmp' LLVM IR: https://llvm.org/docs/LangRef.html#icmp-instruction
+
+define i1 @icmp_eq(i32 signext %a, i32 signext %b) {
+; LA32-LABEL: icmp_eq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    xor $a0, $a0, $a1
+; LA32-NEXT:    sltui $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_eq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    sltui $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp eq i32 %a, %b
+  ret i1 %res
+}
+
+define i1 @icmp_ne(i32 signext %a, i32 signext %b) {
+; LA32-LABEL: icmp_ne:
+; LA32:       # %bb.0:
+; LA32-NEXT:    xor $a0, $a0, $a1
+; LA32-NEXT:    sltu $a0, $zero, $a0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_ne:
+; LA64:       # %bb.0:
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    sltu $a0, $zero, $a0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp ne i32 %a, %b
+  ret i1 %res
+}
+
+define i1 @icmp_ugt(i32 signext %a, i32 signext %b) {
+; LA32-LABEL: icmp_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a1, $a0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a1, $a0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp ugt i32 %a, %b
+  ret i1 %res
+}
+
+define i1 @icmp_uge(i32 signext %a, i32 signext %b) {
+; LA32-LABEL: icmp_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a0, $a1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a0, $a1
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp uge i32 %a, %b
+  ret i1 %res
+}
+
+define i1 @icmp_ult(i32 signext %a, i32 signext %b) {
+; LA32-LABEL: icmp_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp ult i32 %a, %b
+  ret i1 %res
+}
+
+define i1 @icmp_ule(i32 signext %a, i32 signext %b) {
+; LA32-LABEL: icmp_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a1, $a0
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a1, $a0
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp ule i32 %a, %b
+  ret i1 %res
+}
+
+define i1 @icmp_sgt(i32 signext %a, i32 signext %b) {
+; LA32-LABEL: icmp_sgt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a1, $a0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_sgt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a1, $a0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp sgt i32 %a, %b
+  ret i1 %res
+}
+
+define i1 @icmp_sge(i32 signext %a, i32 signext %b) {
+; LA32-LABEL: icmp_sge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a0, $a1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_sge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a0, $a1
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp sge i32 %a, %b
+  ret i1 %res
+}
+
+define i1 @icmp_slt(i32 signext %a, i32 signext %b) {
+; LA32-LABEL: icmp_slt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_slt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp slt i32 %a, %b
+  ret i1 %res
+}
+
+define i1 @icmp_sle(i32 signext %a, i32 signext %b) {
+; LA32-LABEL: icmp_sle:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a1, $a0
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_sle:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a1, $a0
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp sle i32 %a, %b
+  ret i1 %res
+}
+
+define i1 @icmp_slt_3(i32 signext %a) {
+; LA32-LABEL: icmp_slt_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slti $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_slt_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slti $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp slt i32 %a, 3
+  ret i1 %res
+}
+
+define i1 @icmp_ult_3(i32 signext %a) {
+; LA32-LABEL: icmp_ult_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltui $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_ult_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltui $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp ult i32 %a, 3
+  ret i1 %res
+}
+
+define i1 @icmp_eq_0(i32 signext %a) {
+; LA32-LABEL: icmp_eq_0:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltui $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_eq_0:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltui $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp eq i32 %a, 0
+  ret i1 %res
+}
+
+define i1 @icmp_eq_3(i32 signext %a) {
+; LA32-LABEL: icmp_eq_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a0, $a0, -3
+; LA32-NEXT:    sltui $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_eq_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $a0, $a0, -3
+; LA64-NEXT:    sltui $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp eq i32 %a, 3
+  ret i1 %res
+}
+
+define i1 @icmp_ne_0(i32 signext %a) {
+; LA32-LABEL: icmp_ne_0:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $zero, $a0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_ne_0:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $zero, $a0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp ne i32 %a, 0
+  ret i1 %res
+}
+
+define i1 @icmp_ne_3(i32 signext %a) {
+; LA32-LABEL: icmp_ne_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $a0, $a0, -3
+; LA32-NEXT:    sltu $a0, $zero, $a0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: icmp_ne_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $a0, $a0, -3
+; LA64-NEXT:    sltu $a0, $zero, $a0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = icmp ne i32 %a, 3
+  ret i1 %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/lshr.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/lshr.ll
new file mode 100644
index 0000000000000..28081ae501207
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/lshr.ll
@@ -0,0 +1,175 @@
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+;; Exercise the 'lshr' LLVM IR: https://llvm.org/docs/LangRef.html#lshr-instruction
+
+define i1 @lshr_i1(i1 %x, i1 %y) {
+; LA32-LABEL: lshr_i1:
+; LA32:       # %bb.0:
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: lshr_i1:
+; LA64:       # %bb.0:
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %lshr = lshr i1 %x, %y
+  ret i1 %lshr
+}
+
+define i8 @lshr_i8(i8 %x, i8 %y) {
+; LA32-LABEL: lshr_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a0, $a0, 255
+; LA32-NEXT:    srl.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: lshr_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a0, $a0, 255
+; LA64-NEXT:    srl.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %lshr = lshr i8 %x, %y
+  ret i8 %lshr
+}
+
+define i16 @lshr_i16(i16 %x, i16 %y) {
+; LA32-LABEL: lshr_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a2, 15
+; LA32-NEXT:    ori $a2, $a2, 4095
+; LA32-NEXT:    and $a0, $a0, $a2
+; LA32-NEXT:    srl.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: lshr_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    and $a0, $a0, $a2
+; LA64-NEXT:    srl.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %lshr = lshr i16 %x, %y
+  ret i16 %lshr
+}
+
+define i32 @lshr_i32(i32 %x, i32 %y) {
+; LA32-LABEL: lshr_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    srl.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: lshr_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    srl.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %lshr = lshr i32 %x, %y
+  ret i32 %lshr
+}
+
+define i64 @lshr_i64(i64 %x, i64 %y) {
+; LA32-LABEL: lshr_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    xori $a3, $a2, 31
+; LA32-NEXT:    slli.w $a4, $a1, 1
+; LA32-NEXT:    sll.w $a3, $a4, $a3
+; LA32-NEXT:    srl.w $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a3
+; LA32-NEXT:    addi.w $a3, $a2, -32
+; LA32-NEXT:    slti $a4, $a3, 0
+; LA32-NEXT:    maskeqz $a0, $a0, $a4
+; LA32-NEXT:    srl.w $a5, $a1, $a3
+; LA32-NEXT:    masknez $a4, $a5, $a4
+; LA32-NEXT:    or $a0, $a0, $a4
+; LA32-NEXT:    srl.w $a1, $a1, $a2
+; LA32-NEXT:    srai.w $a2, $a3, 31
+; LA32-NEXT:    and $a1, $a2, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: lshr_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    srl.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %lshr = lshr i64 %x, %y
+  ret i64 %lshr
+}
+
+define i1 @lshr_i1_3(i1 %x) {
+; LA32-LABEL: lshr_i1_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: lshr_i1_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %lshr = lshr i1 %x, 3
+  ret i1 %lshr
+}
+
+define i8 @lshr_i8_3(i8 %x) {
+; LA32-LABEL: lshr_i8_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a0, $a0, 248
+; LA32-NEXT:    srli.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: lshr_i8_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a0, $a0, 248
+; LA64-NEXT:    srli.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %lshr = lshr i8 %x, 3
+  ret i8 %lshr
+}
+
+define i16 @lshr_i16_3(i16 %x) {
+; LA32-LABEL: lshr_i16_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    lu12i.w $a1, 15
+; LA32-NEXT:    ori $a1, $a1, 4088
+; LA32-NEXT:    and $a0, $a0, $a1
+; LA32-NEXT:    srli.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: lshr_i16_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu12i.w $a1, 15
+; LA64-NEXT:    ori $a1, $a1, 4088
+; LA64-NEXT:    and $a0, $a0, $a1
+; LA64-NEXT:    srli.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %lshr = lshr i16 %x, 3
+  ret i16 %lshr
+}
+
+define i32 @lshr_i32_3(i32 %x) {
+; LA32-LABEL: lshr_i32_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    srli.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: lshr_i32_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.w $a1, $zero, -8
+; LA64-NEXT:    lu32i.d $a1, 0
+; LA64-NEXT:    and $a0, $a0, $a1
+; LA64-NEXT:    srli.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %lshr = lshr i32 %x, 3
+  ret i32 %lshr
+}
+
+define i64 @lshr_i64_3(i64 %x) {
+; LA32-LABEL: lshr_i64_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    srli.w $a0, $a0, 3
+; LA32-NEXT:    slli.w $a2, $a1, 29
+; LA32-NEXT:    or $a0, $a0, $a2
+; LA32-NEXT:    srli.w $a1, $a1, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: lshr_i64_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    srli.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %lshr = lshr i64 %x, 3
+  ret i64 %lshr
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/or.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/or.ll
new file mode 100644
index 0000000000000..37006573244b5
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/or.ll
@@ -0,0 +1,264 @@
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+;; Exercise the 'or' LLVM IR: https://llvm.org/docs/LangRef.html#or-instruction
+
+define i1 @or_i1(i1 %a, i1 %b) {
+; LA32-LABEL: or_i1:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i1:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i1 %a, %b
+  ret i1 %r
+}
+
+define i8 @or_i8(i8 %a, i8 %b) {
+; LA32-LABEL: or_i8:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i8:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i8 %a, %b
+  ret i8 %r
+}
+
+define i16 @or_i16(i16 %a, i16 %b) {
+; LA32-LABEL: or_i16:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i16:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i16 %a, %b
+  ret i16 %r
+}
+
+define i32 @or_i32(i32 %a, i32 %b) {
+; LA32-LABEL: or_i32:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i32:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i32 %a, %b
+  ret i32 %r
+}
+
+define i64 @or_i64(i64 %a, i64 %b) {
+; LA32-LABEL: or_i64:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    or $a0, $a0, $a2
+; LA32-NEXT:    or $a1, $a1, $a3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i64:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i64 %a, %b
+  ret i64 %r
+}
+
+define i1 @or_i1_0(i1 %b) {
+; LA32-LABEL: or_i1_0:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i1_0:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i1 4, %b
+  ret i1 %r
+}
+
+define i1 @or_i1_5(i1 %b) {
+; LA32-LABEL: or_i1_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    ori $a0, $zero, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i1_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    ori $a0, $zero, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i1 5, %b
+  ret i1 %r
+}
+
+define i8 @or_i8_5(i8 %b) {
+; LA32-LABEL: or_i8_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    ori $a0, $a0, 5
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i8_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    ori $a0, $a0, 5
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i8 5, %b
+  ret i8 %r
+}
+
+define i8 @or_i8_257(i8 %b) {
+; LA32-LABEL: or_i8_257:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    ori $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i8_257:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    ori $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i8 257, %b
+  ret i8 %r
+}
+
+define i16 @or_i16_5(i16 %b) {
+; LA32-LABEL: or_i16_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    ori $a0, $a0, 5
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i16_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    ori $a0, $a0, 5
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i16 5, %b
+  ret i16 %r
+}
+
+define i16 @or_i16_0x1000(i16 %b) {
+; LA32-LABEL: or_i16_0x1000:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    lu12i.w $a1, 1
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i16_0x1000:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    lu12i.w $a1, 1
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i16 4096, %b
+  ret i16 %r
+}
+
+define i16 @or_i16_0x10001(i16 %b) {
+; LA32-LABEL: or_i16_0x10001:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    ori $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i16_0x10001:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    ori $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i16 65537, %b
+  ret i16 %r
+}
+
+define i32 @or_i32_5(i32 %b) {
+; LA32-LABEL: or_i32_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    ori $a0, $a0, 5
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i32_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    ori $a0, $a0, 5
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i32 5, %b
+  ret i32 %r
+}
+
+define i32 @or_i32_0x1000(i32 %b) {
+; LA32-LABEL: or_i32_0x1000:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    lu12i.w $a1, 1
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i32_0x1000:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    lu12i.w $a1, 1
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i32 4096, %b
+  ret i32 %r
+}
+
+define i32 @or_i32_0x100000001(i32 %b) {
+; LA32-LABEL: or_i32_0x100000001:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    ori $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i32_0x100000001:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    ori $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i32 4294967297, %b
+  ret i32 %r
+}
+
+define i64 @or_i64_5(i64 %b) {
+; LA32-LABEL: or_i64_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    ori $a0, $a0, 5
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i64_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    ori $a0, $a0, 5
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i64 5, %b
+  ret i64 %r
+}
+
+define i64 @or_i64_0x1000(i64 %b) {
+; LA32-LABEL: or_i64_0x1000:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    lu12i.w $a2, 1
+; LA32-NEXT:    or $a0, $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: or_i64_0x1000:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    lu12i.w $a1, 1
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = or i64 4096, %b
+  ret i64 %r
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-dbl.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-dbl.ll
new file mode 100644
index 0000000000000..4c6026aba5acf
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-dbl.ll
@@ -0,0 +1,23 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+;; Test the bare double-precision floating-point values selection:
+;; https://llvm.org/docs/LangRef.html#select-instruction
+
+define double @test(i1 %a, double %b, double %c) {
+; LA32-LABEL: test:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a0, $a0, 1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: test:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a0, $a0, 1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = select i1 %a, double %b, double %c
+  ret double %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-flt.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-flt.ll
new file mode 100644
index 0000000000000..af4789b522586
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-flt.ll
@@ -0,0 +1,23 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64
+
+;; Test the bare single-precision floating-point values selection:
+;; https://llvm.org/docs/LangRef.html#select-instruction
+
+define float @test(i1 %a, float %b, float %c) {
+; LA32-LABEL: test:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a0, $a0, 1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: test:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a0, $a0, 1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = select i1 %a, float %b, float %c
+  ret float %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-int.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-int.ll
new file mode 100644
index 0000000000000..3481e79b248bd
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-bare-int.ll
@@ -0,0 +1,107 @@
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+;; Exercise the bare integers 'select' LLVM IR: https://llvm.org/docs/LangRef.html#select-instruction
+
+define i1 @bare_select_i1(i1 %a, i1 %b, i1 %c) {
+; LA32-LABEL: bare_select_i1:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a0, $a0, 1
+; LA32-NEXT:    masknez $a2, $a2, $a0
+; LA32-NEXT:    maskeqz $a0, $a1, $a0
+; LA32-NEXT:    or $a0, $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: bare_select_i1:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a0, $a0, 1
+; LA64-NEXT:    masknez $a2, $a2, $a0
+; LA64-NEXT:    maskeqz $a0, $a1, $a0
+; LA64-NEXT:    or $a0, $a0, $a2
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = select i1 %a, i1 %b, i1 %c
+  ret i1 %res
+}
+
+define i8 @bare_select_i8(i1 %a, i8 %b, i8 %c) {
+; LA32-LABEL: bare_select_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a0, $a0, 1
+; LA32-NEXT:    masknez $a2, $a2, $a0
+; LA32-NEXT:    maskeqz $a0, $a1, $a0
+; LA32-NEXT:    or $a0, $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: bare_select_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a0, $a0, 1
+; LA64-NEXT:    masknez $a2, $a2, $a0
+; LA64-NEXT:    maskeqz $a0, $a1, $a0
+; LA64-NEXT:    or $a0, $a0, $a2
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = select i1 %a, i8 %b, i8 %c
+  ret i8 %res
+}
+
+define i16 @bare_select_i16(i1 %a, i16 %b, i16 %c) {
+; LA32-LABEL: bare_select_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a0, $a0, 1
+; LA32-NEXT:    masknez $a2, $a2, $a0
+; LA32-NEXT:    maskeqz $a0, $a1, $a0
+; LA32-NEXT:    or $a0, $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: bare_select_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a0, $a0, 1
+; LA64-NEXT:    masknez $a2, $a2, $a0
+; LA64-NEXT:    maskeqz $a0, $a1, $a0
+; LA64-NEXT:    or $a0, $a0, $a2
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = select i1 %a, i16 %b, i16 %c
+  ret i16 %res
+}
+
+define i32 @bare_select_i32(i1 %a, i32 %b, i32 %c) {
+; LA32-LABEL: bare_select_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a0, $a0, 1
+; LA32-NEXT:    masknez $a2, $a2, $a0
+; LA32-NEXT:    maskeqz $a0, $a1, $a0
+; LA32-NEXT:    or $a0, $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: bare_select_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a0, $a0, 1
+; LA64-NEXT:    masknez $a2, $a2, $a0
+; LA64-NEXT:    maskeqz $a0, $a1, $a0
+; LA64-NEXT:    or $a0, $a0, $a2
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = select i1 %a, i32 %b, i32 %c
+  ret i32 %res
+}
+
+define i64 @bare_select_i64(i1 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bare_select_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a5, $a0, 1
+; LA32-NEXT:    masknez $a0, $a3, $a5
+; LA32-NEXT:    maskeqz $a1, $a1, $a5
+; LA32-NEXT:    or $a0, $a1, $a0
+; LA32-NEXT:    masknez $a1, $a4, $a5
+; LA32-NEXT:    maskeqz $a2, $a2, $a5
+; LA32-NEXT:    or $a1, $a2, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: bare_select_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a0, $a0, 1
+; LA64-NEXT:    masknez $a2, $a2, $a0
+; LA64-NEXT:    maskeqz $a0, $a1, $a0
+; LA64-NEXT:    or $a0, $a0, $a2
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %res = select i1 %a, i64 %b, i64 %c
+  ret i64 %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-dbl.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-dbl.ll
new file mode 100644
index 0000000000000..4397b64d927be
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-dbl.ll
@@ -0,0 +1,272 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+;; Test double-precision floating-point values selection after comparison
+
+define double @fcmp_false(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_false:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fmov.d $fa0, $fa3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_false:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fmov.d $fa0, $fa3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp false double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}
+
+define double @fcmp_oeq(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_oeq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.ceq.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_oeq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.ceq.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp oeq double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}
+
+define double @fcmp_ogt(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_ogt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ogt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ogt double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}
+
+define double @fcmp_oge(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_oge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_oge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp oge double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}
+
+define double @fcmp_olt(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_olt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_olt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp olt double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}
+
+define double @fcmp_ole(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_ole:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ole:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ole double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}
+
+define double @fcmp_one(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_one:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cne.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_one:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cne.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp one double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}
+
+define double @fcmp_ord(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_ord:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cor.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ord:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cor.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ord double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}
+
+define double @fcmp_ueq(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_ueq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cueq.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ueq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cueq.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ueq double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}
+
+define double @fcmp_ugt(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ugt double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}
+
+define double @fcmp_uge(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp uge double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}
+
+define double @fcmp_ult(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ult double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}
+
+define double @fcmp_ule(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ule double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}
+
+define double @fcmp_une(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_une:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cune.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_une:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cune.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp une double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}
+
+define double @fcmp_uno(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_uno:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cun.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_uno:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cun.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp uno double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}
+
+define double @fcmp_true(double %a, double %b, double %x, double %y) {
+; LA32-LABEL: fcmp_true:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fmov.d $fa0, $fa2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_true:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fmov.d $fa0, $fa2
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp true double %a, %b
+  %res = select i1 %cmp, double %x, double %y
+  ret double %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-flt.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-flt.ll
new file mode 100644
index 0000000000000..23d71493cb4be
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-flt.ll
@@ -0,0 +1,272 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64
+
+;; Test single-precision floating-point values selection after comparison
+
+define float @fcmp_false(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_false:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fmov.s $fa0, $fa3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_false:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fmov.s $fa0, $fa3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp false float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}
+
+define float @fcmp_oeq(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_oeq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.ceq.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_oeq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.ceq.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp oeq float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}
+
+define float @fcmp_ogt(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_ogt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ogt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ogt float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}
+
+define float @fcmp_oge(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_oge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_oge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp oge float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}
+
+define float @fcmp_olt(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_olt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_olt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp olt float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}
+
+define float @fcmp_ole(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_ole:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ole:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ole float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}
+
+define float @fcmp_one(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_one:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cne.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_one:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cne.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp one float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}
+
+define float @fcmp_ord(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_ord:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cor.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ord:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cor.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ord float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}
+
+define float @fcmp_ueq(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_ueq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cueq.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ueq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cueq.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ueq float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}
+
+define float @fcmp_ugt(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ugt float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}
+
+define float @fcmp_uge(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp uge float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}
+
+define float @fcmp_ult(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ult float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}
+
+define float @fcmp_ule(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ule float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}
+
+define float @fcmp_une(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_une:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cune.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_une:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cune.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp une float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}
+
+define float @fcmp_uno(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_uno:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cun.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_uno:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cun.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    fsel $fa0, $fa3, $fa2, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp uno float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}
+
+define float @fcmp_true(float %a, float %b, float %x, float %y) {
+; LA32-LABEL: fcmp_true:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fmov.s $fa0, $fa2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fcmp_true:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fmov.s $fa0, $fa2
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp true float %a, %b
+  %res = select i1 %cmp, float %x, float %y
+  ret float %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-int.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-int.ll
new file mode 100644
index 0000000000000..9e742ee576cbb
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-fpcc-int.ll
@@ -0,0 +1,704 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+;; Test integers selection after `fcmp`
+
+define i32 @f32_fcmp_false(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_false:
+; LA32:       # %bb.0:
+; LA32-NEXT:    move $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_false:
+; LA64:       # %bb.0:
+; LA64-NEXT:    move $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp false float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f32_fcmp_oeq(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_oeq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.ceq.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_oeq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.ceq.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp oeq float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f32_fcmp_ogt(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_ogt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_ogt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ogt float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f32_fcmp_oge(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_oge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_oge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp oge float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f32_fcmp_olt(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_olt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_olt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp olt float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f32_fcmp_ole(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_ole:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_ole:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ole float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f32_fcmp_one(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_one:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cne.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_one:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cne.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp one float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f32_fcmp_ord(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_ord:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cor.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_ord:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cor.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ord float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f32_fcmp_ueq(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_ueq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cueq.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_ueq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cueq.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ueq float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f32_fcmp_ugt(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ugt float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f32_fcmp_uge(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.s $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.s $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp uge float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f32_fcmp_ult(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ult float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f32_fcmp_ule(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ule float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f32_fcmp_une(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_une:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cune.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_une:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cune.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp une float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f32_fcmp_uno(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_uno:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cun.s $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_uno:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cun.s $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp uno float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f32_fcmp_true(float %a, float %b, i32 %x, i32 %y) {
+; LA32-LABEL: f32_fcmp_true:
+; LA32:       # %bb.0:
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_fcmp_true:
+; LA64:       # %bb.0:
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp true float %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_false(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_false:
+; LA32:       # %bb.0:
+; LA32-NEXT:    move $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_false:
+; LA64:       # %bb.0:
+; LA64-NEXT:    move $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp false double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_oeq(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_oeq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.ceq.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_oeq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.ceq.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp oeq double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_ogt(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_ogt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_ogt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ogt double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_oge(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_oge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_oge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp oge double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_olt(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_olt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_olt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp olt double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_ole(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_ole:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cle.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_ole:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cle.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ole double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_one(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_one:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cne.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_one:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cne.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp one double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_ord(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_ord:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cor.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_ord:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cor.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ord double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_ueq(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_ueq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cueq.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_ueq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cueq.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ueq double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_ugt(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ugt double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_uge(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.d $fcc0, $fa1, $fa0
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.d $fcc0, $fa1, $fa0
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp uge double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_ult(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cult.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cult.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ult double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_ule(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cule.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cule.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp ule double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_une(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_une:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cune.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_une:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cune.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp une double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_uno(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_uno:
+; LA32:       # %bb.0:
+; LA32-NEXT:    fcmp.cun.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a2, $fcc0
+; LA32-NEXT:    masknez $a1, $a1, $a2
+; LA32-NEXT:    maskeqz $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_uno:
+; LA64:       # %bb.0:
+; LA64-NEXT:    fcmp.cun.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a2, $fcc0
+; LA64-NEXT:    masknez $a1, $a1, $a2
+; LA64-NEXT:    maskeqz $a0, $a0, $a2
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp uno double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @f64_fcmp_true(double %a, double %b, i32 %x, i32 %y) {
+; LA32-LABEL: f64_fcmp_true:
+; LA32:       # %bb.0:
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_fcmp_true:
+; LA64:       # %bb.0:
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cmp = fcmp true double %a, %b
+  %res = select i1 %cmp, i32 %x, i32 %y
+  ret i32 %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-dbl.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-dbl.ll
new file mode 100644
index 0000000000000..5ccee6b193b0d
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-dbl.ll
@@ -0,0 +1,206 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+;; Test double-precision floating-point values selection after integers comparison
+
+define double @select_eq(i32 signext %a, i32 signext %b, double %x, double %y) {
+; LA32-LABEL: select_eq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    xor $a0, $a0, $a1
+; LA32-NEXT:    sltui $a0, $a0, 1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_eq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    sltui $a0, $a0, 1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp eq i32 %a, %b
+  %res = select i1 %cond, double %x, double %y
+  ret double %res
+}
+
+define double @select_ne(i32 signext %a, i32 signext %b, double %x, double %y) {
+; LA32-LABEL: select_ne:
+; LA32:       # %bb.0:
+; LA32-NEXT:    xor $a0, $a0, $a1
+; LA32-NEXT:    sltu $a0, $zero, $a0
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_ne:
+; LA64:       # %bb.0:
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    sltu $a0, $zero, $a0
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp ne i32 %a, %b
+  %res = select i1 %cond, double %x, double %y
+  ret double %res
+}
+
+define double @select_ugt(i32 signext %a, i32 signext %b, double %x, double %y) {
+; LA32-LABEL: select_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a1, $a0
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a1, $a0
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp ugt i32 %a, %b
+  %res = select i1 %cond, double %x, double %y
+  ret double %res
+}
+
+define double @select_uge(i32 signext %a, i32 signext %b, double %x, double %y) {
+; LA32-LABEL: select_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a0, $a1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a0, $a1
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp uge i32 %a, %b
+  %res = select i1 %cond, double %x, double %y
+  ret double %res
+}
+
+define double @select_ult(i32 signext %a, i32 signext %b, double %x, double %y) {
+; LA32-LABEL: select_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a0, $a1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a0, $a1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp ult i32 %a, %b
+  %res = select i1 %cond, double %x, double %y
+  ret double %res
+}
+
+define double @select_ule(i32 signext %a, i32 signext %b, double %x, double %y) {
+; LA32-LABEL: select_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a1, $a0
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a1, $a0
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp ule i32 %a, %b
+  %res = select i1 %cond, double %x, double %y
+  ret double %res
+}
+
+define double @select_sgt(i32 signext %a, i32 signext %b, double %x, double %y) {
+; LA32-LABEL: select_sgt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a1, $a0
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_sgt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a1, $a0
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp sgt i32 %a, %b
+  %res = select i1 %cond, double %x, double %y
+  ret double %res
+}
+
+define double @select_sge(i32 signext %a, i32 signext %b, double %x, double %y) {
+; LA32-LABEL: select_sge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a0, $a1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_sge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a0, $a1
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp sge i32 %a, %b
+  %res = select i1 %cond, double %x, double %y
+  ret double %res
+}
+
+define double @select_slt(i32 signext %a, i32 signext %b, double %x, double %y) {
+; LA32-LABEL: select_slt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a0, $a1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_slt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a0, $a1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp slt i32 %a, %b
+  %res = select i1 %cond, double %x, double %y
+  ret double %res
+}
+
+define double @select_sle(i32 signext %a, i32 signext %b, double %x, double %y) {
+; LA32-LABEL: select_sle:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a1, $a0
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_sle:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a1, $a0
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp sle i32 %a, %b
+  %res = select i1 %cond, double %x, double %y
+  ret double %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-flt.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-flt.ll
new file mode 100644
index 0000000000000..98b999776e3f6
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-flt.ll
@@ -0,0 +1,206 @@
+; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64
+
+;; Test single-precision floating-point values selection after integers comparison
+
+define float @select_eq(i32 signext %a, i32 signext %b, float %x, float %y) {
+; LA32-LABEL: select_eq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    xor $a0, $a0, $a1
+; LA32-NEXT:    sltui $a0, $a0, 1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_eq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    sltui $a0, $a0, 1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp eq i32 %a, %b
+  %res = select i1 %cond, float %x, float %y
+  ret float %res
+}
+
+define float @select_ne(i32 signext %a, i32 signext %b, float %x, float %y) {
+; LA32-LABEL: select_ne:
+; LA32:       # %bb.0:
+; LA32-NEXT:    xor $a0, $a0, $a1
+; LA32-NEXT:    sltu $a0, $zero, $a0
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_ne:
+; LA64:       # %bb.0:
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    sltu $a0, $zero, $a0
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp ne i32 %a, %b
+  %res = select i1 %cond, float %x, float %y
+  ret float %res
+}
+
+define float @select_ugt(i32 signext %a, i32 signext %b, float %x, float %y) {
+; LA32-LABEL: select_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a1, $a0
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a1, $a0
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp ugt i32 %a, %b
+  %res = select i1 %cond, float %x, float %y
+  ret float %res
+}
+
+define float @select_uge(i32 signext %a, i32 signext %b, float %x, float %y) {
+; LA32-LABEL: select_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a0, $a1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a0, $a1
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp uge i32 %a, %b
+  %res = select i1 %cond, float %x, float %y
+  ret float %res
+}
+
+define float @select_ult(i32 signext %a, i32 signext %b, float %x, float %y) {
+; LA32-LABEL: select_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a0, $a1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a0, $a1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp ult i32 %a, %b
+  %res = select i1 %cond, float %x, float %y
+  ret float %res
+}
+
+define float @select_ule(i32 signext %a, i32 signext %b, float %x, float %y) {
+; LA32-LABEL: select_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a1, $a0
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a1, $a0
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp ule i32 %a, %b
+  %res = select i1 %cond, float %x, float %y
+  ret float %res
+}
+
+define float @select_sgt(i32 signext %a, i32 signext %b, float %x, float %y) {
+; LA32-LABEL: select_sgt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a1, $a0
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_sgt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a1, $a0
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp sgt i32 %a, %b
+  %res = select i1 %cond, float %x, float %y
+  ret float %res
+}
+
+define float @select_sge(i32 signext %a, i32 signext %b, float %x, float %y) {
+; LA32-LABEL: select_sge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a0, $a1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_sge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a0, $a1
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp sge i32 %a, %b
+  %res = select i1 %cond, float %x, float %y
+  ret float %res
+}
+
+define float @select_slt(i32 signext %a, i32 signext %b, float %x, float %y) {
+; LA32-LABEL: select_slt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a0, $a1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_slt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a0, $a1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp slt i32 %a, %b
+  %res = select i1 %cond, float %x, float %y
+  ret float %res
+}
+
+define float @select_sle(i32 signext %a, i32 signext %b, float %x, float %y) {
+; LA32-LABEL: select_sle:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a1, $a0
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    movgr2cf $fcc0, $a0
+; LA32-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_sle:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a1, $a0
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    movgr2cf $fcc0, $a0
+; LA64-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp sle i32 %a, %b
+  %res = select i1 %cond, float %x, float %y
+  ret float %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-int.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-int.ll
new file mode 100644
index 0000000000000..3b7c2adfb868b
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/select-icc-int.ll
@@ -0,0 +1,226 @@
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+;; Test integers selection after integers comparison
+
+define i32 @select_eq(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
+; LA32-LABEL: select_eq:
+; LA32:       # %bb.0:
+; LA32-NEXT:    xor $a0, $a0, $a1
+; LA32-NEXT:    sltui $a0, $a0, 1
+; LA32-NEXT:    masknez $a1, $a3, $a0
+; LA32-NEXT:    maskeqz $a0, $a2, $a0
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_eq:
+; LA64:       # %bb.0:
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    sltui $a0, $a0, 1
+; LA64-NEXT:    masknez $a1, $a3, $a0
+; LA64-NEXT:    maskeqz $a0, $a2, $a0
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp eq i32 %a, %b
+  %res = select i1 %cond, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @select_ne(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
+; LA32-LABEL: select_ne:
+; LA32:       # %bb.0:
+; LA32-NEXT:    xor $a0, $a0, $a1
+; LA32-NEXT:    sltu $a0, $zero, $a0
+; LA32-NEXT:    masknez $a1, $a3, $a0
+; LA32-NEXT:    maskeqz $a0, $a2, $a0
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_ne:
+; LA64:       # %bb.0:
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    sltu $a0, $zero, $a0
+; LA64-NEXT:    masknez $a1, $a3, $a0
+; LA64-NEXT:    maskeqz $a0, $a2, $a0
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp ne i32 %a, %b
+  %res = select i1 %cond, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @select_ugt(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
+; LA32-LABEL: select_ugt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a1, $a0
+; LA32-NEXT:    masknez $a1, $a3, $a0
+; LA32-NEXT:    maskeqz $a0, $a2, $a0
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_ugt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a1, $a0
+; LA64-NEXT:    masknez $a1, $a3, $a0
+; LA64-NEXT:    maskeqz $a0, $a2, $a0
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp ugt i32 %a, %b
+  %res = select i1 %cond, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @select_uge(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
+; LA32-LABEL: select_uge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a0, $a1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    masknez $a1, $a3, $a0
+; LA32-NEXT:    maskeqz $a0, $a2, $a0
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_uge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a0, $a1
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    masknez $a1, $a3, $a0
+; LA64-NEXT:    maskeqz $a0, $a2, $a0
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp uge i32 %a, %b
+  %res = select i1 %cond, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @select_ult(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
+; LA32-LABEL: select_ult:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a0, $a1
+; LA32-NEXT:    masknez $a1, $a3, $a0
+; LA32-NEXT:    maskeqz $a0, $a2, $a0
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_ult:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a0, $a1
+; LA64-NEXT:    masknez $a1, $a3, $a0
+; LA64-NEXT:    maskeqz $a0, $a2, $a0
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp ult i32 %a, %b
+  %res = select i1 %cond, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @select_ule(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
+; LA32-LABEL: select_ule:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sltu $a0, $a1, $a0
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    masknez $a1, $a3, $a0
+; LA32-NEXT:    maskeqz $a0, $a2, $a0
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_ule:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sltu $a0, $a1, $a0
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    masknez $a1, $a3, $a0
+; LA64-NEXT:    maskeqz $a0, $a2, $a0
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp ule i32 %a, %b
+  %res = select i1 %cond, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @select_sgt(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
+; LA32-LABEL: select_sgt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a1, $a0
+; LA32-NEXT:    masknez $a1, $a3, $a0
+; LA32-NEXT:    maskeqz $a0, $a2, $a0
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_sgt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a1, $a0
+; LA64-NEXT:    masknez $a1, $a3, $a0
+; LA64-NEXT:    maskeqz $a0, $a2, $a0
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp sgt i32 %a, %b
+  %res = select i1 %cond, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @select_sge(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
+; LA32-LABEL: select_sge:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a0, $a1
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    masknez $a1, $a3, $a0
+; LA32-NEXT:    maskeqz $a0, $a2, $a0
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_sge:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a0, $a1
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    masknez $a1, $a3, $a0
+; LA64-NEXT:    maskeqz $a0, $a2, $a0
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp sge i32 %a, %b
+  %res = select i1 %cond, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @select_slt(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
+; LA32-LABEL: select_slt:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a0, $a1
+; LA32-NEXT:    masknez $a1, $a3, $a0
+; LA32-NEXT:    maskeqz $a0, $a2, $a0
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_slt:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a0, $a1
+; LA64-NEXT:    masknez $a1, $a3, $a0
+; LA64-NEXT:    maskeqz $a0, $a2, $a0
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp slt i32 %a, %b
+  %res = select i1 %cond, i32 %x, i32 %y
+  ret i32 %res
+}
+
+define i32 @select_sle(i32 signext %a, i32 signext %b, i32 %x, i32 %y) {
+; LA32-LABEL: select_sle:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slt $a0, $a1, $a0
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    masknez $a1, $a3, $a0
+; LA32-NEXT:    maskeqz $a0, $a2, $a0
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: select_sle:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slt $a0, $a1, $a0
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    masknez $a1, $a3, $a0
+; LA64-NEXT:    maskeqz $a0, $a2, $a0
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %cond = icmp sle i32 %a, %b
+  %res = select i1 %cond, i32 %x, i32 %y
+  ret i32 %res
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/shl.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/shl.ll
new file mode 100644
index 0000000000000..de25040452b12
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/shl.ll
@@ -0,0 +1,156 @@
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+;; Exercise the 'shl' LLVM IR: https://llvm.org/docs/LangRef.html#shl-instruction
+
+define i1 @shl_i1(i1 %x, i1 %y) {
+; LA32-LABEL: shl_i1:
+; LA32:       # %bb.0:
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: shl_i1:
+; LA64:       # %bb.0:
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %shl = shl i1 %x, %y
+  ret i1 %shl
+}
+
+define i8 @shl_i8(i8 %x, i8 %y) {
+; LA32-LABEL: shl_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sll.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: shl_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sll.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %shl = shl i8 %x, %y
+  ret i8 %shl
+}
+
+define i16 @shl_i16(i16 %x, i16 %y) {
+; LA32-LABEL: shl_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sll.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: shl_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sll.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %shl = shl i16 %x, %y
+  ret i16 %shl
+}
+
+define i32 @shl_i32(i32 %x, i32 %y) {
+; LA32-LABEL: shl_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sll.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: shl_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sll.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %shl = shl i32 %x, %y
+  ret i32 %shl
+}
+
+define i64 @shl_i64(i64 %x, i64 %y) {
+; LA32-LABEL: shl_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    xori $a3, $a2, 31
+; LA32-NEXT:    srli.w $a4, $a0, 1
+; LA32-NEXT:    srl.w $a3, $a4, $a3
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    or $a1, $a1, $a3
+; LA32-NEXT:    addi.w $a3, $a2, -32
+; LA32-NEXT:    slti $a4, $a3, 0
+; LA32-NEXT:    maskeqz $a1, $a1, $a4
+; LA32-NEXT:    sll.w $a5, $a0, $a3
+; LA32-NEXT:    masknez $a4, $a5, $a4
+; LA32-NEXT:    or $a1, $a1, $a4
+; LA32-NEXT:    sll.w $a0, $a0, $a2
+; LA32-NEXT:    srai.w $a2, $a3, 31
+; LA32-NEXT:    and $a0, $a2, $a0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: shl_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sll.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %shl = shl i64 %x, %y
+  ret i64 %shl
+}
+
+define i1 @shl_i1_3(i1 %x) {
+; LA32-LABEL: shl_i1_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: shl_i1_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %shl = shl i1 %x, 3
+  ret i1 %shl
+}
+
+define i8 @shl_i8_3(i8 %x) {
+; LA32-LABEL: shl_i8_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: shl_i8_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %shl = shl i8 %x, 3
+  ret i8 %shl
+}
+
+define i16 @shl_i16_3(i16 %x) {
+; LA32-LABEL: shl_i16_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: shl_i16_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %shl = shl i16 %x, 3
+  ret i16 %shl
+}
+
+define i32 @shl_i32_3(i32 %x) {
+; LA32-LABEL: shl_i32_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: shl_i32_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %shl = shl i32 %x, 3
+  ret i32 %shl
+}
+
+define i64 @shl_i64_3(i64 %x) {
+; LA32-LABEL: shl_i64_3:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a1, $a1, 3
+; LA32-NEXT:    srli.w $a2, $a0, 29
+; LA32-NEXT:    or $a1, $a1, $a2
+; LA32-NEXT:    slli.w $a0, $a0, 3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: shl_i64_3:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a0, $a0, 3
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %shl = shl i64 %x, 3
+  ret i64 %shl
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/xor.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/xor.ll
new file mode 100644
index 0000000000000..2f85e645c04f7
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/xor.ll
@@ -0,0 +1,264 @@
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+;; Exercise the 'xor' LLVM IR: https://llvm.org/docs/LangRef.html#xor-instruction
+
+define i1 @xor_i1(i1 %a, i1 %b) {
+; LA32-LABEL: xor_i1:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xor $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i1:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i1 %a, %b
+  ret i1 %r
+}
+
+define i8 @xor_i8(i8 %a, i8 %b) {
+; LA32-LABEL: xor_i8:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xor $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i8:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i8 %a, %b
+  ret i8 %r
+}
+
+define i16 @xor_i16(i16 %a, i16 %b) {
+; LA32-LABEL: xor_i16:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xor $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i16:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i16 %a, %b
+  ret i16 %r
+}
+
+define i32 @xor_i32(i32 %a, i32 %b) {
+; LA32-LABEL: xor_i32:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xor $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i32:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i32 %a, %b
+  ret i32 %r
+}
+
+define i64 @xor_i64(i64 %a, i64 %b) {
+; LA32-LABEL: xor_i64:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xor $a0, $a0, $a2
+; LA32-NEXT:    xor $a1, $a1, $a3
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i64:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i64 %a, %b
+  ret i64 %r
+}
+
+define i1 @xor_i1_0(i1 %b) {
+; LA32-LABEL: xor_i1_0:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i1_0:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i1 4, %b
+  ret i1 %r
+}
+
+define i1 @xor_i1_5(i1 %b) {
+; LA32-LABEL: xor_i1_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i1_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i1 5, %b
+  ret i1 %r
+}
+
+define i8 @xor_i8_5(i8 %b) {
+; LA32-LABEL: xor_i8_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xori $a0, $a0, 5
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i8_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xori $a0, $a0, 5
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i8 5, %b
+  ret i8 %r
+}
+
+define i8 @xor_i8_257(i8 %b) {
+; LA32-LABEL: xor_i8_257:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i8_257:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i8 257, %b
+  ret i8 %r
+}
+
+define i16 @xor_i16_5(i16 %b) {
+; LA32-LABEL: xor_i16_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xori $a0, $a0, 5
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i16_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xori $a0, $a0, 5
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i16 5, %b
+  ret i16 %r
+}
+
+define i16 @xor_i16_0x1000(i16 %b) {
+; LA32-LABEL: xor_i16_0x1000:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    lu12i.w $a1, 1
+; LA32-NEXT:    xor $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i16_0x1000:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    lu12i.w $a1, 1
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i16 4096, %b
+  ret i16 %r
+}
+
+define i16 @xor_i16_0x10001(i16 %b) {
+; LA32-LABEL: xor_i16_0x10001:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i16_0x10001:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i16 65537, %b
+  ret i16 %r
+}
+
+define i32 @xor_i32_5(i32 %b) {
+; LA32-LABEL: xor_i32_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xori $a0, $a0, 5
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i32_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xori $a0, $a0, 5
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i32 5, %b
+  ret i32 %r
+}
+
+define i32 @xor_i32_0x1000(i32 %b) {
+; LA32-LABEL: xor_i32_0x1000:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    lu12i.w $a1, 1
+; LA32-NEXT:    xor $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i32_0x1000:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    lu12i.w $a1, 1
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i32 4096, %b
+  ret i32 %r
+}
+
+define i32 @xor_i32_0x100000001(i32 %b) {
+; LA32-LABEL: xor_i32_0x100000001:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xori $a0, $a0, 1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i32_0x100000001:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xori $a0, $a0, 1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i32 4294967297, %b
+  ret i32 %r
+}
+
+define i64 @xor_i64_5(i64 %b) {
+; LA32-LABEL: xor_i64_5:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    xori $a0, $a0, 5
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i64_5:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    xori $a0, $a0, 5
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i64 5, %b
+  ret i64 %r
+}
+
+define i64 @xor_i64_0x1000(i64 %b) {
+; LA32-LABEL: xor_i64_0x1000:
+; LA32:       # %bb.0: # %entry
+; LA32-NEXT:    lu12i.w $a2, 1
+; LA32-NEXT:    xor $a0, $a0, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: xor_i64_0x1000:
+; LA64:       # %bb.0: # %entry
+; LA64-NEXT:    lu12i.w $a1, 1
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+entry:
+  %r = xor i64 4096, %b
+  ret i64 %r
+}

diff  --git a/llvm/test/CodeGen/LoongArch/shift-masked-shamt.ll b/llvm/test/CodeGen/LoongArch/shift-masked-shamt.ll
new file mode 100644
index 0000000000000..1878e0ed24240
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/shift-masked-shamt.ll
@@ -0,0 +1,255 @@
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+;; This test checks that unnecessary masking of shift amount operands is
+;; eliminated during instruction selection. The test needs to ensure that the
+;; masking is not removed if it may affect the shift amount.
+
+define i32 @sll_redundant_mask(i32 %a, i32 %b) {
+; LA32-LABEL: sll_redundant_mask:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sll.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sll_redundant_mask:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sll.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = and i32 %b, 31
+  %2 = shl i32 %a, %1
+  ret i32 %2
+}
+
+define i32 @sll_non_redundant_mask(i32 %a, i32 %b) {
+; LA32-LABEL: sll_non_redundant_mask:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a1, $a1, 15
+; LA32-NEXT:    sll.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sll_non_redundant_mask:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a1, $a1, 15
+; LA64-NEXT:    sll.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = and i32 %b, 15
+  %2 = shl i32 %a, %1
+  ret i32 %2
+}
+
+define i32 @srl_redundant_mask(i32 %a, i32 %b) {
+; LA32-LABEL: srl_redundant_mask:
+; LA32:       # %bb.0:
+; LA32-NEXT:    srl.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: srl_redundant_mask:
+; LA64:       # %bb.0:
+; LA64-NEXT:    srl.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = and i32 %b, 4095
+  %2 = lshr i32 %a, %1
+  ret i32 %2
+}
+
+define i32 @srl_non_redundant_mask(i32 %a, i32 %b) {
+; LA32-LABEL: srl_non_redundant_mask:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a1, $a1, 7
+; LA32-NEXT:    srl.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: srl_non_redundant_mask:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a1, $a1, 7
+; LA64-NEXT:    srl.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = and i32 %b, 7
+  %2 = lshr i32 %a, %1
+  ret i32 %2
+}
+
+define i32 @sra_redundant_mask(i32 %a, i32 %b) {
+; LA32-LABEL: sra_redundant_mask:
+; LA32:       # %bb.0:
+; LA32-NEXT:    sra.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sra_redundant_mask:
+; LA64:       # %bb.0:
+; LA64-NEXT:    sra.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = and i32 %b, 65535
+  %2 = ashr i32 %a, %1
+  ret i32 %2
+}
+
+define i32 @sra_non_redundant_mask(i32 %a, i32 %b) {
+; LA32-LABEL: sra_non_redundant_mask:
+; LA32:       # %bb.0:
+; LA32-NEXT:    andi $a1, $a1, 32
+; LA32-NEXT:    sra.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sra_non_redundant_mask:
+; LA64:       # %bb.0:
+; LA64-NEXT:    andi $a1, $a1, 32
+; LA64-NEXT:    sra.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = and i32 %b, 32
+  %2 = ashr i32 %a, %1
+  ret i32 %2
+}
+
+define i32 @sll_redundant_mask_zeros(i32 %a, i32 %b) {
+; LA32-LABEL: sll_redundant_mask_zeros:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a1, $a1, 1
+; LA32-NEXT:    sll.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sll_redundant_mask_zeros:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a1, 1
+; LA64-NEXT:    sll.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = shl i32 %b, 1
+  %2 = and i32 %1, 30
+  %3 = shl i32 %a, %2
+  ret i32 %3
+}
+
+define i32 @srl_redundant_mask_zeros(i32 %a, i32 %b) {
+; LA32-LABEL: srl_redundant_mask_zeros:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a1, $a1, 2
+; LA32-NEXT:    srl.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: srl_redundant_mask_zeros:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a1, 2
+; LA64-NEXT:    srl.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = shl i32 %b, 2
+  %2 = and i32 %1, 28
+  %3 = lshr i32 %a, %2
+  ret i32 %3
+}
+
+define i32 @sra_redundant_mask_zeros(i32 %a, i32 %b) {
+; LA32-LABEL: sra_redundant_mask_zeros:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a1, $a1, 3
+; LA32-NEXT:    sra.w $a0, $a0, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sra_redundant_mask_zeros:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a1, 3
+; LA64-NEXT:    sra.w $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = shl i32 %b, 3
+  %2 = and i32 %1, 24
+  %3 = ashr i32 %a, %2
+  ret i32 %3
+}
+
+define i64 @sll_redundant_mask_zeros_i64(i64 %a, i64 %b) {
+; LA32-LABEL: sll_redundant_mask_zeros_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a2, 2
+; LA32-NEXT:    srli.w $a3, $a0, 1
+; LA32-NEXT:    andi $a4, $a2, 60
+; LA32-NEXT:    xori $a5, $a4, 31
+; LA32-NEXT:    srl.w $a3, $a3, $a5
+; LA32-NEXT:    sll.w $a1, $a1, $a2
+; LA32-NEXT:    or $a1, $a1, $a3
+; LA32-NEXT:    addi.w $a3, $a4, -32
+; LA32-NEXT:    slti $a4, $a3, 0
+; LA32-NEXT:    maskeqz $a1, $a1, $a4
+; LA32-NEXT:    sll.w $a5, $a0, $a3
+; LA32-NEXT:    masknez $a4, $a5, $a4
+; LA32-NEXT:    or $a1, $a1, $a4
+; LA32-NEXT:    sll.w $a0, $a0, $a2
+; LA32-NEXT:    srai.w $a2, $a3, 31
+; LA32-NEXT:    and $a0, $a2, $a0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sll_redundant_mask_zeros_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a1, 2
+; LA64-NEXT:    sll.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = shl i64 %b, 2
+  %2 = and i64 %1, 60
+  %3 = shl i64 %a, %2
+  ret i64 %3
+}
+
+define i64 @srl_redundant_mask_zeros_i64(i64 %a, i64 %b) {
+; LA32-LABEL: srl_redundant_mask_zeros_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a2, $a2, 3
+; LA32-NEXT:    slli.w $a3, $a1, 1
+; LA32-NEXT:    andi $a4, $a2, 56
+; LA32-NEXT:    xori $a5, $a4, 31
+; LA32-NEXT:    sll.w $a3, $a3, $a5
+; LA32-NEXT:    srl.w $a0, $a0, $a2
+; LA32-NEXT:    or $a0, $a0, $a3
+; LA32-NEXT:    addi.w $a3, $a4, -32
+; LA32-NEXT:    slti $a4, $a3, 0
+; LA32-NEXT:    maskeqz $a0, $a0, $a4
+; LA32-NEXT:    srl.w $a5, $a1, $a3
+; LA32-NEXT:    masknez $a4, $a5, $a4
+; LA32-NEXT:    or $a0, $a0, $a4
+; LA32-NEXT:    srl.w $a1, $a1, $a2
+; LA32-NEXT:    srai.w $a2, $a3, 31
+; LA32-NEXT:    and $a1, $a2, $a1
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: srl_redundant_mask_zeros_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a1, 3
+; LA64-NEXT:    srl.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = shl i64 %b, 3
+  %2 = and i64 %1, 56
+  %3 = lshr i64 %a, %2
+  ret i64 %3
+}
+
+define i64 @sra_redundant_mask_zeros_i64(i64 %a, i64 %b) {
+; LA32-LABEL: sra_redundant_mask_zeros_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    slli.w $a3, $a2, 4
+; LA32-NEXT:    srai.w $a2, $a1, 31
+; LA32-NEXT:    andi $a4, $a3, 48
+; LA32-NEXT:    addi.w $a5, $a4, -32
+; LA32-NEXT:    slti $a6, $a5, 0
+; LA32-NEXT:    masknez $a2, $a2, $a6
+; LA32-NEXT:    sra.w $a7, $a1, $a3
+; LA32-NEXT:    maskeqz $a7, $a7, $a6
+; LA32-NEXT:    or $a2, $a7, $a2
+; LA32-NEXT:    srl.w $a0, $a0, $a3
+; LA32-NEXT:    xori $a3, $a4, 31
+; LA32-NEXT:    slli.w $a4, $a1, 1
+; LA32-NEXT:    sll.w $a3, $a4, $a3
+; LA32-NEXT:    or $a0, $a0, $a3
+; LA32-NEXT:    sra.w $a1, $a1, $a5
+; LA32-NEXT:    maskeqz $a0, $a0, $a6
+; LA32-NEXT:    masknez $a1, $a1, $a6
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    move $a1, $a2
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: sra_redundant_mask_zeros_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a1, 4
+; LA64-NEXT:    sra.d $a0, $a0, $a1
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = shl i64 %b, 4
+  %2 = and i64 %1, 48
+  %3 = ashr i64 %a, %2
+  ret i64 %3
+}


        


More information about the llvm-commits mailing list