[llvm] [LoongArch] Optimize conditional branches (PR #147885)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 1 20:58:01 PDT 2025
https://github.com/heiher updated https://github.com/llvm/llvm-project/pull/147885
>From 2d572b156673b3f69326b3dbb8881284ca1f4b18 Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Thu, 10 Jul 2025 12:08:16 +0800
Subject: [PATCH 1/4] [LoongArch] Optimize conditional branches
This patch attempts to optimize conditional branches by combinding logical
operations within the conditions. This enables the selection of more efficient
branch instructions. For example, for integers, `blez x` can be used instead of
`blt x, (ori, t, 1)`; for floating-point comparisons, dedicated floating-point
branch instructions can be used to avoid moving the result to an integer
register.
---
.../LoongArch/LoongArchFloat32InstrInfo.td | 12 +-
.../LoongArch/LoongArchFloat64InstrInfo.td | 2 +
.../LoongArch/LoongArchISelLowering.cpp | 175 ++++++++++++++++++
.../Target/LoongArch/LoongArchISelLowering.h | 5 +
.../Target/LoongArch/LoongArchInstrInfo.td | 62 +++----
.../LoongArch/ir-instruction/fcmp-dbl.ll | 14 +-
.../LoongArch/ir-instruction/fcmp-flt.ll | 14 +-
.../LoongArch/merge-base-offset-tlsle.ll | 6 +-
.../CodeGen/LoongArch/merge-base-offset.ll | 9 +-
.../CodeGen/LoongArch/preferred-alignments.ll | 11 +-
10 files changed, 232 insertions(+), 78 deletions(-)
diff --git a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
index d5a5f17348e4b..99205fbe023c8 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
@@ -10,6 +10,9 @@
//
//===----------------------------------------------------------------------===//
+def NotBoolXor : PatFrags<(ops node:$val),
+ [(xor node:$val, -1), (xor node:$val, 1)]>;
+
//===----------------------------------------------------------------------===//
// LoongArch specific DAG Nodes.
//===----------------------------------------------------------------------===//
@@ -22,6 +25,7 @@ def SDT_LoongArchFTINT : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>;
def SDT_LoongArchFRECIPE : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>;
def SDT_LoongArchFRSQRTE : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>;
+def loongarch_brcond : SDNode<"LoongArchISD::BRCOND", SDTBrcond, [SDNPHasChain]>;
def loongarch_movgr2fr_w_la64
: SDNode<"LoongArchISD::MOVGR2FR_W_LA64", SDT_LoongArchMOVGR2FR_W_LA64>;
def loongarch_movfr2gr_s_la64
@@ -208,16 +212,18 @@ def : PatFPSetcc<SETUO, FCMP_CUN_S, FPR32>;
def : PatFPSetcc<SETLT, FCMP_CLT_S, FPR32>;
multiclass PatFPBrcond<CondCode cc, LAInst CmpInst, RegisterClass RegTy> {
- def : Pat<(brcond (xor (GRLenVT (setcc RegTy:$fj, RegTy:$fk, cc)), -1),
- bb:$imm21),
+ def : Pat<(loongarch_brcond (NotBoolXor (GRLenVT (setcc RegTy:$fj, RegTy:$fk, cc))),
+ bb:$imm21),
(BCEQZ (CmpInst RegTy:$fj, RegTy:$fk), bb:$imm21)>;
- def : Pat<(brcond (GRLenVT (setcc RegTy:$fj, RegTy:$fk, cc)), bb:$imm21),
+ def : Pat<(loongarch_brcond (GRLenVT (setcc RegTy:$fj, RegTy:$fk, cc)), bb:$imm21),
(BCNEZ (CmpInst RegTy:$fj, RegTy:$fk), bb:$imm21)>;
}
defm : PatFPBrcond<SETOEQ, FCMP_CEQ_S, FPR32>;
+defm : PatFPBrcond<SETEQ , FCMP_CEQ_S, FPR32>;
defm : PatFPBrcond<SETOLT, FCMP_CLT_S, FPR32>;
defm : PatFPBrcond<SETOLE, FCMP_CLE_S, FPR32>;
+defm : PatFPBrcond<SETLE, FCMP_CLE_S, FPR32>;
defm : PatFPBrcond<SETONE, FCMP_CNE_S, FPR32>;
defm : PatFPBrcond<SETO, FCMP_COR_S, FPR32>;
defm : PatFPBrcond<SETUEQ, FCMP_CUEQ_S, FPR32>;
diff --git a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
index 616640152c8d3..965ad8a0a35c6 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
@@ -184,8 +184,10 @@ def : PatFPSetcc<SETUO, FCMP_CUN_D, FPR64>;
def : PatFPSetcc<SETLT, FCMP_CLT_D, FPR64>;
defm : PatFPBrcond<SETOEQ, FCMP_CEQ_D, FPR64>;
+defm : PatFPBrcond<SETEQ, FCMP_CEQ_D, FPR64>;
defm : PatFPBrcond<SETOLT, FCMP_CLT_D, FPR64>;
defm : PatFPBrcond<SETOLE, FCMP_CLE_D, FPR64>;
+defm : PatFPBrcond<SETLE, FCMP_CLE_D, FPR64>;
defm : PatFPBrcond<SETONE, FCMP_CNE_D, FPR64>;
defm : PatFPBrcond<SETO, FCMP_COR_D, FPR64>;
defm : PatFPBrcond<SETUEQ, FCMP_CUEQ_D, FPR64>;
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 72dbb44815657..23ec33213c2cf 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -126,6 +126,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, GRLenVT, Expand);
+ setOperationAction(ISD::BRCOND, MVT::Other, Custom);
setOperationAction(ISD::SELECT_CC, GRLenVT, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, GRLenVT, Expand);
@@ -509,6 +510,8 @@ SDValue LoongArchTargetLowering::LowerOperation(SDValue Op,
return lowerPREFETCH(Op, DAG);
case ISD::SELECT:
return lowerSELECT(Op, DAG);
+ case ISD::BRCOND:
+ return lowerBRCOND(Op, DAG);
case ISD::FP_TO_FP16:
return lowerFP_TO_FP16(Op, DAG);
case ISD::FP16_TO_FP:
@@ -854,6 +857,35 @@ SDValue LoongArchTargetLowering::lowerSELECT(SDValue Op,
return DAG.getNode(LoongArchISD::SELECT_CC, DL, VT, Ops);
}
+SDValue LoongArchTargetLowering::lowerBRCOND(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue CondV = Op.getOperand(1);
+ SDLoc DL(Op);
+ MVT GRLenVT = Subtarget.getGRLenVT();
+
+ if (CondV.getOpcode() == ISD::SETCC) {
+ if (CondV.getOperand(0).getValueType() == GRLenVT) {
+ SDValue LHS = CondV.getOperand(0);
+ SDValue RHS = CondV.getOperand(1);
+ ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
+
+ translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
+
+ SDValue TargetCC = DAG.getCondCode(CCVal);
+ return DAG.getNode(LoongArchISD::BR_CC, DL, Op.getValueType(),
+ Op.getOperand(0), LHS, RHS, TargetCC,
+ Op.getOperand(2));
+ } else if (CondV.getOperand(0).getValueType().isFloatingPoint()) {
+ return DAG.getNode(LoongArchISD::BRCOND, DL, Op.getValueType(),
+ Op.getOperand(0), CondV, Op.getOperand(2));
+ }
+ }
+
+ return DAG.getNode(LoongArchISD::BR_CC, DL, Op.getValueType(),
+ Op.getOperand(0), CondV, DAG.getConstant(0, DL, GRLenVT),
+ DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
+}
+
SDValue
LoongArchTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
@@ -5020,6 +5052,145 @@ static SDValue performBITREV_WCombine(SDNode *N, SelectionDAG &DAG,
Src.getOperand(0));
}
+// Perform common combines for BR_CC and SELECT_CC conditions.
+static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL,
+ SelectionDAG &DAG, const LoongArchSubtarget &Subtarget) {
+ ISD::CondCode CCVal = cast<CondCodeSDNode>(CC)->get();
+
+ // As far as arithmetic right shift always saves the sign,
+ // shift can be omitted.
+ // Fold setlt (sra X, N), 0 -> setlt X, 0 and
+ // setge (sra X, N), 0 -> setge X, 0
+ if (isNullConstant(RHS) && (CCVal == ISD::SETGE || CCVal == ISD::SETLT) &&
+ LHS.getOpcode() == ISD::SRA) {
+ LHS = LHS.getOperand(0);
+ return true;
+ }
+
+ if (!ISD::isIntEqualitySetCC(CCVal))
+ return false;
+
+ // Fold ((setlt X, Y), 0, ne) -> (X, Y, lt)
+ // Sometimes the setcc is introduced after br_cc/select_cc has been formed.
+ if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
+ LHS.getOperand(0).getValueType() == Subtarget.getGRLenVT()) {
+ // If we're looking for eq 0 instead of ne 0, we need to invert the
+ // condition.
+ bool Invert = CCVal == ISD::SETEQ;
+ CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
+ if (Invert)
+ CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
+
+ RHS = LHS.getOperand(1);
+ LHS = LHS.getOperand(0);
+ translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
+
+ CC = DAG.getCondCode(CCVal);
+ return true;
+ }
+
+ // If XOR is reused and has an immediate that will fit in XORI,
+ // do not fold.
+ auto isXorImmediate = [](const SDValue &Op) -> bool {
+ if (const auto *XorCnst = dyn_cast<ConstantSDNode>(Op))
+ return isInt<12>(XorCnst->getSExtValue());
+ return false;
+ };
+ // Fold (X(i1) ^ 1) == 0 -> X != 0
+ auto singleBitOp = [&DAG](const SDValue &VarOp,
+ const SDValue &ConstOp) -> bool {
+ if (const auto *XorCnst = dyn_cast<ConstantSDNode>(ConstOp)) {
+ const APInt Mask = APInt::getBitsSetFrom(VarOp.getValueSizeInBits(), 1);
+ return (XorCnst->getSExtValue() == 1) &&
+ DAG.MaskedValueIsZero(VarOp, Mask);
+ }
+ return false;
+ };
+ auto onlyUsedBySelectOrBR = [](const SDValue &Op) -> bool {
+ for (const SDNode *UserNode : Op->users()) {
+ const unsigned Opcode = UserNode->getOpcode();
+ if (Opcode != LoongArchISD::SELECT_CC && Opcode != LoongArchISD::BR_CC)
+ return false;
+ }
+ return true;
+ };
+ auto isFoldableXorEq = [isXorImmediate, singleBitOp, onlyUsedBySelectOrBR](
+ const SDValue &LHS, const SDValue &RHS) -> bool {
+ return LHS.getOpcode() == ISD::XOR && isNullConstant(RHS) &&
+ (!isXorImmediate(LHS.getOperand(1)) ||
+ singleBitOp(LHS.getOperand(0), LHS.getOperand(1)) ||
+ onlyUsedBySelectOrBR(LHS));
+ };
+ // Fold ((xor X, Y), 0, eq/ne) -> (X, Y, eq/ne)
+ if (isFoldableXorEq(LHS, RHS)) {
+ RHS = LHS.getOperand(1);
+ LHS = LHS.getOperand(0);
+ return true;
+ }
+ // Fold ((sext (xor X, C)), 0, eq/ne) -> ((sext(X), C, eq/ne)
+ if (LHS.getOpcode() == ISD::SIGN_EXTEND_INREG) {
+ const SDValue LHS0 = LHS.getOperand(0);
+ if (isFoldableXorEq(LHS0, RHS) && isa<ConstantSDNode>(LHS0.getOperand(1))) {
+ // SEXT(XOR(X, Y)) -> XOR(SEXT(X), SEXT(Y)))
+ RHS = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, LHS.getValueType(),
+ LHS0.getOperand(1), LHS.getOperand(1));
+ LHS = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, LHS.getValueType(),
+ LHS0.getOperand(0), LHS.getOperand(1));
+ return true;
+ }
+ }
+
+ // Fold ((srl (and X, 1<<C), C), 0, eq/ne) -> ((shl X, GRLen-1-C), 0, ge/lt)
+ if (isNullConstant(RHS) && LHS.getOpcode() == ISD::SRL && LHS.hasOneUse() &&
+ LHS.getOperand(1).getOpcode() == ISD::Constant) {
+ SDValue LHS0 = LHS.getOperand(0);
+ if (LHS0.getOpcode() == ISD::AND &&
+ LHS0.getOperand(1).getOpcode() == ISD::Constant) {
+ uint64_t Mask = LHS0.getConstantOperandVal(1);
+ uint64_t ShAmt = LHS.getConstantOperandVal(1);
+ if (isPowerOf2_64(Mask) && Log2_64(Mask) == ShAmt) {
+ CCVal = CCVal == ISD::SETEQ ? ISD::SETGE : ISD::SETLT;
+ CC = DAG.getCondCode(CCVal);
+
+ ShAmt = LHS.getValueSizeInBits() - 1 - ShAmt;
+ LHS = LHS0.getOperand(0);
+ if (ShAmt != 0)
+ LHS =
+ DAG.getNode(ISD::SHL, DL, LHS.getValueType(), LHS0.getOperand(0),
+ DAG.getConstant(ShAmt, DL, LHS.getValueType()));
+ return true;
+ }
+ }
+ }
+
+ // (X, 1, setne) -> (X, 0, seteq) if we can prove X is 0/1.
+ // This can occur when legalizing some floating point comparisons.
+ APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
+ if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
+ CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
+ CC = DAG.getCondCode(CCVal);
+ RHS = DAG.getConstant(0, DL, LHS.getValueType());
+ return true;
+ }
+
+ return false;
+}
+
+static SDValue performBR_CCCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const LoongArchSubtarget &Subtarget) {
+ SDValue LHS = N->getOperand(1);
+ SDValue RHS = N->getOperand(2);
+ SDValue CC = N->getOperand(3);
+ SDLoc DL(N);
+
+ if (combine_CC(LHS, RHS, CC, DL, DAG, Subtarget))
+ return DAG.getNode(LoongArchISD::BR_CC, DL, N->getValueType(0),
+ N->getOperand(0), LHS, RHS, CC, N->getOperand(4));
+
+ return SDValue();
+}
+
template <unsigned N>
static SDValue legalizeIntrinsicImmArg(SDNode *Node, unsigned ImmOp,
SelectionDAG &DAG,
@@ -5712,6 +5883,8 @@ SDValue LoongArchTargetLowering::PerformDAGCombine(SDNode *N,
return performBITCASTCombine(N, DAG, DCI, Subtarget);
case LoongArchISD::BITREV_W:
return performBITREV_WCombine(N, DAG, DCI, Subtarget);
+ case LoongArchISD::BR_CC:
+ return performBR_CCCombine(N, DAG, DCI, Subtarget);
case ISD::INTRINSIC_WO_CHAIN:
return performINTRINSIC_WO_CHAINCombine(N, DAG, DCI, Subtarget);
case LoongArchISD::MOVGR2FR_W_LA64:
@@ -6435,6 +6608,8 @@ const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(TAIL_MEDIUM)
NODE_NAME_CASE(TAIL_LARGE)
NODE_NAME_CASE(SELECT_CC)
+ NODE_NAME_CASE(BR_CC)
+ NODE_NAME_CASE(BRCOND)
NODE_NAME_CASE(SLL_W)
NODE_NAME_CASE(SRA_W)
NODE_NAME_CASE(SRL_W)
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 60dc2b385a75c..3f849ef05845b 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -37,6 +37,10 @@ enum NodeType : unsigned {
// Select
SELECT_CC,
+ // Branch
+ BR_CC,
+ BRCOND,
+
// 32-bit shifts, directly matching the semantics of the named LoongArch
// instructions.
SLL_W,
@@ -381,6 +385,7 @@ class LoongArchTargetLowering : public TargetLowering {
SDValue lowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerFP_TO_BF16(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index 2b94e65cac0e5..20ccc622f58dc 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -31,6 +31,10 @@ def SDT_LoongArchSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<1, 2>,
SDTCisSameAs<0, 4>,
SDTCisSameAs<4, 5>]>;
+def SDT_LoongArchBrCC : SDTypeProfile<0, 4, [SDTCisSameAs<0, 1>,
+ SDTCisVT<2, OtherVT>,
+ SDTCisVT<3, OtherVT>]>;
+
def SDT_LoongArchBStrIns: SDTypeProfile<1, 4, [
SDTCisInt<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<3>,
SDTCisSameAs<3, 4>
@@ -94,6 +98,8 @@ def loongarch_tail_large : SDNode<"LoongArchISD::TAIL_LARGE", SDT_LoongArchCall,
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
SDNPVariadic]>;
def loongarch_selectcc : SDNode<"LoongArchISD::SELECT_CC", SDT_LoongArchSelectCC>;
+def loongarch_brcc : SDNode<"LoongArchISD::BR_CC", SDT_LoongArchBrCC,
+ [SDNPHasChain]>;
def loongarch_sll_w : SDNode<"LoongArchISD::SLL_W", SDT_LoongArchIntBinOpW>;
def loongarch_sra_w : SDNode<"LoongArchISD::SRA_W", SDT_LoongArchIntBinOpW>;
def loongarch_srl_w : SDNode<"LoongArchISD::SRL_W", SDT_LoongArchIntBinOpW>;
@@ -1537,47 +1543,29 @@ def : Pat<(select GPR:$cond, GPR:$t, GPR:$f),
/// Branches and jumps
-class BccPat<PatFrag CondOp, LAInst Inst>
- : Pat<(brcond (GRLenVT (CondOp GPR:$rj, GPR:$rd)), bb:$imm16),
- (Inst GPR:$rj, GPR:$rd, bb:$imm16)>;
-
-def : BccPat<seteq, BEQ>;
-def : BccPat<setne, BNE>;
-def : BccPat<setlt, BLT>;
-def : BccPat<setge, BGE>;
-def : BccPat<setult, BLTU>;
-def : BccPat<setuge, BGEU>;
-
-class BccSwapPat<PatFrag CondOp, LAInst InstBcc>
- : Pat<(brcond (GRLenVT (CondOp GPR:$rd, GPR:$rj)), bb:$imm16),
- (InstBcc GPR:$rj, GPR:$rd, bb:$imm16)>;
-
-// Condition codes that don't have matching LoongArch branch instructions, but
-// are trivially supported by swapping the two input operands.
-def : BccSwapPat<setgt, BLT>;
-def : BccSwapPat<setle, BGE>;
-def : BccSwapPat<setugt, BLTU>;
-def : BccSwapPat<setule, BGEU>;
-
let Predicates = [Has32S] in {
-// An extra pattern is needed for a brcond without a setcc (i.e. where the
-// condition was calculated elsewhere).
-def : Pat<(brcond GPR:$rj, bb:$imm21), (BNEZ GPR:$rj, bb:$imm21)>;
-
-def : Pat<(brcond (GRLenVT (seteq GPR:$rj, 0)), bb:$imm21),
- (BEQZ GPR:$rj, bb:$imm21)>;
-def : Pat<(brcond (GRLenVT (setne GPR:$rj, 0)), bb:$imm21),
- (BNEZ GPR:$rj, bb:$imm21)>;
+class BccZeroPat<CondCode Cond, LAInst Inst>
+ : Pat<(loongarch_brcc (GRLenVT GPR:$rj), 0, Cond, bb:$imm21),
+ (Inst GPR:$rj, bb:$imm21)>;
+
+def : BccZeroPat<SETEQ, BEQZ>;
+def : BccZeroPat<SETNE, BNEZ>;
} // Predicates = [Has32S]
-// An extra pattern is needed for a brcond without a setcc (i.e. where the
-// condition was calculated elsewhere).
-def : Pat<(brcond GPR:$rj, bb:$imm16), (BNE GPR:$rj, R0, bb:$imm16)>;
+multiclass BccPat<CondCode Cond, LAInst Inst> {
+ def : Pat<(loongarch_brcc (GRLenVT GPR:$rj), GPR:$rd, Cond, bb:$imm16),
+ (Inst GPR:$rj, GPR:$rd, bb:$imm16)>;
+ // Explicitly select 0 to R0. The register coalescer doesn't always do it.
+ def : Pat<(loongarch_brcc (GRLenVT GPR:$rj), 0, Cond, bb:$imm16),
+ (Inst GPR:$rj, (GRLenVT R0), bb:$imm16)>;
+}
-def : Pat<(brcond (GRLenVT (seteq GPR:$rj, 0)), bb:$imm16),
- (BEQ GPR:$rj, R0, bb:$imm16)>;
-def : Pat<(brcond (GRLenVT (setne GPR:$rj, 0)), bb:$imm16),
- (BNE GPR:$rj, R0, bb:$imm16)>;
+defm : BccPat<SETEQ, BEQ>;
+defm : BccPat<SETNE, BNE>;
+defm : BccPat<SETLT, BLT>;
+defm : BccPat<SETGE, BGE>;
+defm : BccPat<SETULT, BLTU>;
+defm : BccPat<SETUGE, BGEU>;
let isBarrier = 1, isBranch = 1, isTerminator = 1 in
def PseudoBR : Pseudo<(outs), (ins simm26_b:$imm26), [(br bb:$imm26)]>,
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll
index cff3484934214..713af3fd9c84d 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-dbl.ll
@@ -263,8 +263,7 @@ define i1 @fcmp_fast_olt(double %a, double %b, i1 %c) nounwind {
; LA32-NEXT: movgr2fr.w $fa1, $zero
; LA32-NEXT: movgr2frh.w $fa1, $zero
; LA32-NEXT: fcmp.cle.d $fcc0, $fa1, $fa0
-; LA32-NEXT: movcf2gr $a1, $fcc0
-; LA32-NEXT: bnez $a1, .LBB16_2
+; LA32-NEXT: bcnez $fcc0, .LBB16_2
; LA32-NEXT: # %bb.1: # %if.then
; LA32-NEXT: ret
; LA32-NEXT: .LBB16_2: # %if.else
@@ -276,8 +275,7 @@ define i1 @fcmp_fast_olt(double %a, double %b, i1 %c) nounwind {
; LA64: # %bb.0:
; LA64-NEXT: movgr2fr.d $fa1, $zero
; LA64-NEXT: fcmp.cle.d $fcc0, $fa1, $fa0
-; LA64-NEXT: movcf2gr $a1, $fcc0
-; LA64-NEXT: bnez $a1, .LBB16_2
+; LA64-NEXT: bcnez $fcc0, .LBB16_2
; LA64-NEXT: # %bb.1: # %if.then
; LA64-NEXT: ret
; LA64-NEXT: .LBB16_2: # %if.else
@@ -300,9 +298,7 @@ define i1 @fcmp_fast_oeq(double %a, double %b, i1 %c) nounwind {
; LA32-NEXT: movgr2fr.w $fa1, $zero
; LA32-NEXT: movgr2frh.w $fa1, $zero
; LA32-NEXT: fcmp.ceq.d $fcc0, $fa0, $fa1
-; LA32-NEXT: movcf2gr $a1, $fcc0
-; LA32-NEXT: xori $a1, $a1, 1
-; LA32-NEXT: bnez $a1, .LBB17_2
+; LA32-NEXT: bceqz $fcc0, .LBB17_2
; LA32-NEXT: # %bb.1: # %if.then
; LA32-NEXT: ret
; LA32-NEXT: .LBB17_2: # %if.else
@@ -313,9 +309,7 @@ define i1 @fcmp_fast_oeq(double %a, double %b, i1 %c) nounwind {
; LA64: # %bb.0:
; LA64-NEXT: movgr2fr.d $fa1, $zero
; LA64-NEXT: fcmp.ceq.d $fcc0, $fa0, $fa1
-; LA64-NEXT: movcf2gr $a1, $fcc0
-; LA64-NEXT: xori $a1, $a1, 1
-; LA64-NEXT: bnez $a1, .LBB17_2
+; LA64-NEXT: bceqz $fcc0, .LBB17_2
; LA64-NEXT: # %bb.1: # %if.then
; LA64-NEXT: ret
; LA64-NEXT: .LBB17_2: # %if.else
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-flt.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-flt.ll
index 8b682ecac50f5..4a97f693fafd7 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-flt.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fcmp-flt.ll
@@ -262,8 +262,7 @@ define i1 @fcmp_fast_olt(float %a, float %b, i1 %c) nounwind {
; LA32: # %bb.0:
; LA32-NEXT: movgr2fr.w $fa1, $zero
; LA32-NEXT: fcmp.cle.s $fcc0, $fa1, $fa0
-; LA32-NEXT: movcf2gr $a1, $fcc0
-; LA32-NEXT: bnez $a1, .LBB16_2
+; LA32-NEXT: bcnez $fcc0, .LBB16_2
; LA32-NEXT: # %bb.1: # %if.then
; LA32-NEXT: ret
; LA32-NEXT: .LBB16_2: # %if.else
@@ -275,8 +274,7 @@ define i1 @fcmp_fast_olt(float %a, float %b, i1 %c) nounwind {
; LA64: # %bb.0:
; LA64-NEXT: movgr2fr.w $fa1, $zero
; LA64-NEXT: fcmp.cle.s $fcc0, $fa1, $fa0
-; LA64-NEXT: movcf2gr $a1, $fcc0
-; LA64-NEXT: bnez $a1, .LBB16_2
+; LA64-NEXT: bcnez $fcc0, .LBB16_2
; LA64-NEXT: # %bb.1: # %if.then
; LA64-NEXT: ret
; LA64-NEXT: .LBB16_2: # %if.else
@@ -298,9 +296,7 @@ define i1 @fcmp_fast_oeq(float %a, float %b, i1 %c) nounwind {
; LA32: # %bb.0:
; LA32-NEXT: movgr2fr.w $fa1, $zero
; LA32-NEXT: fcmp.ceq.s $fcc0, $fa0, $fa1
-; LA32-NEXT: movcf2gr $a1, $fcc0
-; LA32-NEXT: xori $a1, $a1, 1
-; LA32-NEXT: bnez $a1, .LBB17_2
+; LA32-NEXT: bceqz $fcc0, .LBB17_2
; LA32-NEXT: # %bb.1: # %if.then
; LA32-NEXT: ret
; LA32-NEXT: .LBB17_2: # %if.else
@@ -311,9 +307,7 @@ define i1 @fcmp_fast_oeq(float %a, float %b, i1 %c) nounwind {
; LA64: # %bb.0:
; LA64-NEXT: movgr2fr.w $fa1, $zero
; LA64-NEXT: fcmp.ceq.s $fcc0, $fa0, $fa1
-; LA64-NEXT: movcf2gr $a1, $fcc0
-; LA64-NEXT: xori $a1, $a1, 1
-; LA64-NEXT: bnez $a1, .LBB17_2
+; LA64-NEXT: bceqz $fcc0, .LBB17_2
; LA64-NEXT: # %bb.1: # %if.then
; LA64-NEXT: ret
; LA64-NEXT: .LBB17_2: # %if.else
diff --git a/llvm/test/CodeGen/LoongArch/merge-base-offset-tlsle.ll b/llvm/test/CodeGen/LoongArch/merge-base-offset-tlsle.ll
index 9ed9a865ce55d..97d33379913e5 100644
--- a/llvm/test/CodeGen/LoongArch/merge-base-offset-tlsle.ll
+++ b/llvm/test/CodeGen/LoongArch/merge-base-offset-tlsle.ll
@@ -630,8 +630,7 @@ define dso_local void @tlsle_control_flow_with_mem_access() nounwind {
; LA32-NEXT: lu12i.w $a0, %le_hi20_r(g_a32+4)
; LA32-NEXT: add.w $a0, $a0, $tp, %le_add_r(g_a32+4)
; LA32-NEXT: ld.w $a1, $a0, %le_lo12_r(g_a32+4)
-; LA32-NEXT: ori $a2, $zero, 1
-; LA32-NEXT: blt $a1, $a2, .LBB25_2
+; LA32-NEXT: blez $a1, .LBB25_2
; LA32-NEXT: # %bb.1: # %if.then
; LA32-NEXT: ori $a1, $zero, 10
; LA32-NEXT: st.w $a1, $a0, %le_lo12_r(g_a32+4)
@@ -643,8 +642,7 @@ define dso_local void @tlsle_control_flow_with_mem_access() nounwind {
; LA64-NEXT: lu12i.w $a0, %le_hi20_r(g_a32+4)
; LA64-NEXT: add.d $a0, $a0, $tp, %le_add_r(g_a32+4)
; LA64-NEXT: ld.w $a1, $a0, %le_lo12_r(g_a32+4)
-; LA64-NEXT: ori $a2, $zero, 1
-; LA64-NEXT: blt $a1, $a2, .LBB25_2
+; LA64-NEXT: blez $a1, .LBB25_2
; LA64-NEXT: # %bb.1: # %if.then
; LA64-NEXT: ori $a1, $zero, 10
; LA64-NEXT: st.w $a1, $a0, %le_lo12_r(g_a32+4)
diff --git a/llvm/test/CodeGen/LoongArch/merge-base-offset.ll b/llvm/test/CodeGen/LoongArch/merge-base-offset.ll
index 2af206699d4ad..1151c77c9af76 100644
--- a/llvm/test/CodeGen/LoongArch/merge-base-offset.ll
+++ b/llvm/test/CodeGen/LoongArch/merge-base-offset.ll
@@ -811,8 +811,7 @@ define dso_local void @control_flow_with_mem_access() nounwind {
; LA32: # %bb.0: # %entry
; LA32-NEXT: pcalau12i $a0, %pc_hi20(g_a32+4)
; LA32-NEXT: ld.w $a1, $a0, %pc_lo12(g_a32+4)
-; LA32-NEXT: ori $a2, $zero, 1
-; LA32-NEXT: blt $a1, $a2, .LBB25_2
+; LA32-NEXT: blez $a1, .LBB25_2
; LA32-NEXT: # %bb.1: # %if.then
; LA32-NEXT: ori $a1, $zero, 10
; LA32-NEXT: st.w $a1, $a0, %pc_lo12(g_a32+4)
@@ -823,8 +822,7 @@ define dso_local void @control_flow_with_mem_access() nounwind {
; LA64: # %bb.0: # %entry
; LA64-NEXT: pcalau12i $a0, %pc_hi20(g_a32+4)
; LA64-NEXT: ld.w $a1, $a0, %pc_lo12(g_a32+4)
-; LA64-NEXT: ori $a2, $zero, 1
-; LA64-NEXT: blt $a1, $a2, .LBB25_2
+; LA64-NEXT: blez $a1, .LBB25_2
; LA64-NEXT: # %bb.1: # %if.then
; LA64-NEXT: ori $a1, $zero, 10
; LA64-NEXT: st.w $a1, $a0, %pc_lo12(g_a32+4)
@@ -838,8 +836,7 @@ define dso_local void @control_flow_with_mem_access() nounwind {
; LA64-LARGE-NEXT: lu32i.d $a1, %pc64_lo20(g_a32+4)
; LA64-LARGE-NEXT: lu52i.d $a1, $a1, %pc64_hi12(g_a32+4)
; LA64-LARGE-NEXT: ldx.w $a0, $a1, $a0
-; LA64-LARGE-NEXT: ori $a1, $zero, 1
-; LA64-LARGE-NEXT: blt $a0, $a1, .LBB25_2
+; LA64-LARGE-NEXT: blez $a0, .LBB25_2
; LA64-LARGE-NEXT: # %bb.1: # %if.then
; LA64-LARGE-NEXT: pcalau12i $a0, %pc_hi20(g_a32+4)
; LA64-LARGE-NEXT: addi.d $a1, $zero, %pc_lo12(g_a32+4)
diff --git a/llvm/test/CodeGen/LoongArch/preferred-alignments.ll b/llvm/test/CodeGen/LoongArch/preferred-alignments.ll
index c3618db646016..0f81f860025df 100644
--- a/llvm/test/CodeGen/LoongArch/preferred-alignments.ll
+++ b/llvm/test/CodeGen/LoongArch/preferred-alignments.ll
@@ -5,10 +5,9 @@
define signext i32 @sum(ptr noalias nocapture noundef readonly %0, i32 noundef signext %1) {
; LA464-LABEL: sum:
; LA464: # %bb.0:
-; LA464-NEXT: ori $a2, $zero, 1
-; LA464-NEXT: blt $a1, $a2, .LBB0_4
-; LA464-NEXT: # %bb.1:
; LA464-NEXT: move $a2, $zero
+; LA464-NEXT: blez $a1, .LBB0_3
+; LA464-NEXT: # %bb.1:
; LA464-NEXT: bstrpick.d $a1, $a1, 31, 0
; LA464-NEXT: .p2align 4, , 16
; LA464-NEXT: .LBB0_2: # =>This Inner Loop Header: Depth=1
@@ -17,11 +16,7 @@ define signext i32 @sum(ptr noalias nocapture noundef readonly %0, i32 noundef s
; LA464-NEXT: addi.d $a1, $a1, -1
; LA464-NEXT: addi.d $a0, $a0, 4
; LA464-NEXT: bnez $a1, .LBB0_2
-; LA464-NEXT: # %bb.3:
-; LA464-NEXT: move $a0, $a2
-; LA464-NEXT: ret
-; LA464-NEXT: .LBB0_4:
-; LA464-NEXT: move $a2, $zero
+; LA464-NEXT: .LBB0_3:
; LA464-NEXT: move $a0, $a2
; LA464-NEXT: ret
%3 = icmp sgt i32 %1, 0
>From 8ca3f38632d1cea10d0a7c555ca8e91f04251818 Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Fri, 18 Jul 2025 11:40:27 +0800
Subject: [PATCH 2/4] Address weining's comments
---
llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td | 2 ++
1 file changed, 2 insertions(+)
diff --git a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
index 99205fbe023c8..c8b7edd75caee 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
@@ -25,6 +25,8 @@ def SDT_LoongArchFTINT : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>;
def SDT_LoongArchFRECIPE : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>;
def SDT_LoongArchFRSQRTE : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>;
+// ISD::BRCOND is custom-lowered to LoongArchISD::BRCOND for floating-point
+// comparisons to prevent recursive lowering.
def loongarch_brcond : SDNode<"LoongArchISD::BRCOND", SDTBrcond, [SDNPHasChain]>;
def loongarch_movgr2fr_w_la64
: SDNode<"LoongArchISD::MOVGR2FR_W_LA64", SDT_LoongArchMOVGR2FR_W_LA64>;
>From fd0e9f6d7a3407e92d3e479230536b2a1ea8e811 Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Sat, 2 Aug 2025 11:38:01 +0800
Subject: [PATCH 3/4] [LoongArch][NFC] Pre-commit for conditional branch
optimization
---
llvm/test/CodeGen/LoongArch/bittest.ll | 3366 +++++++++++++++++++
llvm/test/CodeGen/LoongArch/select-const.ll | 25 +
2 files changed, 3391 insertions(+)
create mode 100644 llvm/test/CodeGen/LoongArch/bittest.ll
diff --git a/llvm/test/CodeGen/LoongArch/bittest.ll b/llvm/test/CodeGen/LoongArch/bittest.ll
new file mode 100644
index 0000000000000..210e4edbb38ff
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/bittest.ll
@@ -0,0 +1,3366 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 -mattr=+d < %s | FileCheck %s --check-prefixes=CHECK,LA32
+; RUN: llc --mtriple=loongarch64 -mattr=+d < %s | FileCheck %s --check-prefixes=CHECK,LA64
+
+define signext i32 @bittest_7_i32(i32 signext %a) nounwind {
+; LA32-LABEL: bittest_7_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 7
+; LA32-NEXT: andi $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_7_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 7, 7
+; LA64-NEXT: ret
+ %shr = lshr i32 %a, 7
+ %not = xor i32 %shr, -1
+ %and = and i32 %not, 1
+ ret i32 %and
+}
+
+define signext i32 @bittest_10_i32(i32 signext %a) nounwind {
+; LA32-LABEL: bittest_10_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 10
+; LA32-NEXT: andi $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_10_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 10, 10
+; LA64-NEXT: ret
+ %shr = lshr i32 %a, 10
+ %not = xor i32 %shr, -1
+ %and = and i32 %not, 1
+ ret i32 %and
+}
+
+define signext i32 @bittest_11_i32(i32 signext %a) nounwind {
+; LA32-LABEL: bittest_11_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 11
+; LA32-NEXT: andi $a0, $a0, 1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_11_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 11, 11
+; LA64-NEXT: ret
+ %shr = lshr i32 %a, 11
+ %not = xor i32 %shr, -1
+ %and = and i32 %not, 1
+ ret i32 %and
+}
+
+define signext i32 @bittest_31_i32(i32 signext %a) nounwind {
+; LA32-LABEL: bittest_31_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 31
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_31_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 31
+; LA64-NEXT: ret
+ %shr = lshr i32 %a, 31
+ %not = xor i32 %shr, -1
+ %and = and i32 %not, 1
+ ret i32 %and
+}
+
+define i64 @bittest_7_i64(i64 %a) nounwind {
+; LA32-LABEL: bittest_7_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 7
+; LA32-NEXT: andi $a0, $a0, 1
+; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_7_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 7, 7
+; LA64-NEXT: ret
+ %shr = lshr i64 %a, 7
+ %not = xor i64 %shr, -1
+ %and = and i64 %not, 1
+ ret i64 %and
+}
+
+define i64 @bittest_10_i64(i64 %a) nounwind {
+; LA32-LABEL: bittest_10_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 10
+; LA32-NEXT: andi $a0, $a0, 1
+; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_10_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 10, 10
+; LA64-NEXT: ret
+ %shr = lshr i64 %a, 10
+ %not = xor i64 %shr, -1
+ %and = and i64 %not, 1
+ ret i64 %and
+}
+
+define i64 @bittest_11_i64(i64 %a) nounwind {
+; LA32-LABEL: bittest_11_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 11
+; LA32-NEXT: andi $a0, $a0, 1
+; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_11_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 11, 11
+; LA64-NEXT: ret
+ %shr = lshr i64 %a, 11
+ %not = xor i64 %shr, -1
+ %and = and i64 %not, 1
+ ret i64 %and
+}
+
+define i64 @bittest_31_i64(i64 %a) nounwind {
+; LA32-LABEL: bittest_31_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a0, $zero
+; LA32-NEXT: srli.w $a0, $a0, 31
+; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_31_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 31
+; LA64-NEXT: ret
+ %shr = lshr i64 %a, 31
+ %not = xor i64 %shr, -1
+ %and = and i64 %not, 1
+ ret i64 %and
+}
+
+define i64 @bittest_32_i64(i64 %a) nounwind {
+; LA32-LABEL: bittest_32_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: ori $a0, $zero, 1
+; LA32-NEXT: andn $a0, $a0, $a1
+; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_32_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: bstrpick.d $a0, $a0, 32, 32
+; LA64-NEXT: ret
+ %shr = lshr i64 %a, 32
+ %not = xor i64 %shr, -1
+ %and = and i64 %not, 1
+ ret i64 %and
+}
+
+define i64 @bittest_63_i64(i64 %a) nounwind {
+; LA32-LABEL: bittest_63_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: nor $a0, $a1, $zero
+; LA32-NEXT: srli.w $a0, $a0, 31
+; LA32-NEXT: move $a1, $zero
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_63_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: nor $a0, $a0, $zero
+; LA64-NEXT: srli.d $a0, $a0, 63
+; LA64-NEXT: ret
+ %shr = lshr i64 %a, 63
+ %not = xor i64 %shr, -1
+ %and = and i64 %not, 1
+ ret i64 %and
+}
+
+define i1 @bittest_constant_by_var_shr_i32(i32 signext %b) nounwind {
+; CHECK-LABEL: bittest_constant_by_var_shr_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lu12i.w $a1, 301408
+; CHECK-NEXT: ori $a1, $a1, 722
+; CHECK-NEXT: srl.w $a0, $a1, $a0
+; CHECK-NEXT: andi $a0, $a0, 1
+; CHECK-NEXT: ret
+ %shl = lshr i32 1234567890, %b
+ %and = and i32 %shl, 1
+ %cmp = icmp ne i32 %and, 0
+ ret i1 %cmp
+}
+
+define i1 @bittest_constant_by_var_shl_i32(i32 signext %b) nounwind {
+; CHECK-LABEL: bittest_constant_by_var_shl_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ori $a1, $zero, 1
+; CHECK-NEXT: sll.w $a0, $a1, $a0
+; CHECK-NEXT: lu12i.w $a1, 301408
+; CHECK-NEXT: ori $a1, $a1, 722
+; CHECK-NEXT: and $a0, $a0, $a1
+; CHECK-NEXT: sltu $a0, $zero, $a0
+; CHECK-NEXT: ret
+ %shl = shl i32 1, %b
+ %and = and i32 %shl, 1234567890
+ %cmp = icmp ne i32 %and, 0
+ ret i1 %cmp
+}
+
+define i1 @bittest_constant_by_var_shr_i64(i64 %b) nounwind {
+; LA32-LABEL: bittest_constant_by_var_shr_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 301408
+; LA32-NEXT: ori $a1, $a1, 722
+; LA32-NEXT: srl.w $a1, $a1, $a0
+; LA32-NEXT: addi.w $a0, $a0, -32
+; LA32-NEXT: slti $a0, $a0, 0
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_constant_by_var_shr_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a1, 301408
+; LA64-NEXT: ori $a1, $a1, 722
+; LA64-NEXT: srl.d $a0, $a1, $a0
+; LA64-NEXT: andi $a0, $a0, 1
+; LA64-NEXT: ret
+ %shl = lshr i64 1234567890, %b
+ %and = and i64 %shl, 1
+ %cmp = icmp ne i64 %and, 0
+ ret i1 %cmp
+}
+
+define i1 @bittest_constant_by_var_shl_i64(i64 %b) nounwind {
+; LA32-LABEL: bittest_constant_by_var_shl_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $a1, $a0, -32
+; LA32-NEXT: slti $a1, $a1, 0
+; LA32-NEXT: sub.w $a1, $zero, $a1
+; LA32-NEXT: ori $a2, $zero, 1
+; LA32-NEXT: sll.w $a0, $a2, $a0
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: lu12i.w $a1, 301408
+; LA32-NEXT: ori $a1, $a1, 722
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: sltu $a0, $zero, $a0
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_constant_by_var_shl_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: ori $a1, $zero, 1
+; LA64-NEXT: sll.d $a0, $a1, $a0
+; LA64-NEXT: lu12i.w $a1, 301408
+; LA64-NEXT: ori $a1, $a1, 722
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: ret
+ %shl = shl i64 1, %b
+ %and = and i64 %shl, 1234567890
+ %cmp = icmp ne i64 %and, 0
+ ret i1 %cmp
+}
+
+define void @bittest_switch(i32 signext %0) {
+; LA32-LABEL: bittest_switch:
+; LA32: # %bb.0:
+; LA32-NEXT: ori $a1, $zero, 31
+; LA32-NEXT: bltu $a1, $a0, .LBB14_3
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ori $a1, $zero, 1
+; LA32-NEXT: sll.w $a0, $a1, $a0
+; LA32-NEXT: lu12i.w $a1, -524285
+; LA32-NEXT: ori $a1, $a1, 768
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB14_3
+; LA32-NEXT: # %bb.2:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB14_3:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bittest_switch:
+; LA64: # %bb.0:
+; LA64-NEXT: ori $a1, $zero, 31
+; LA64-NEXT: bltu $a1, $a0, .LBB14_3
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ori $a1, $zero, 1
+; LA64-NEXT: sll.d $a0, $a1, $a0
+; LA64-NEXT: lu12i.w $a1, -524285
+; LA64-NEXT: ori $a1, $a1, 768
+; LA64-NEXT: lu32i.d $a1, 0
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: beqz $a0, .LBB14_3
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB14_3:
+; LA64-NEXT: ret
+ switch i32 %0, label %3 [
+ i32 8, label %2
+ i32 9, label %2
+ i32 12, label %2
+ i32 13, label %2
+ i32 31, label %2
+ ]
+
+2:
+ tail call void @bar()
+ br label %3
+
+3:
+ ret void
+}
+
+declare void @bar()
+
+define signext i32 @bit_10_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_10_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a3, $a0, 1024
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: beq $a3, $zero, .LBB15_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB15_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1024
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1024
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_10_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_10_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1024
+; LA32-NEXT: srli.w $a3, $a0, 10
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB16_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB16_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 10, 10
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1024
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_11_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_11_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 20
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bgez $a3, .LBB17_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB17_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2048
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2048
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_11_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_11_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2048
+; LA32-NEXT: srli.w $a3, $a0, 11
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB18_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB18_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 11, 11
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2048
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_20_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_20_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 11
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bgez $a3, .LBB19_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB19_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a3, 256
+; LA64-NEXT: and $a0, $a0, $a3
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1048576
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_20_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_20_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a3, 256
+; LA32-NEXT: and $a0, $a0, $a3
+; LA32-NEXT: srli.w $a3, $a0, 20
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB20_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB20_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 20, 20
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1048576
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_31_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_31_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: bgez $a0, .LBB21_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a1, $a2
+; LA32-NEXT: .LBB21_2:
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrins.d $a0, $zero, 30, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2147483648
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_31_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_31_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: srli.w $a3, $a0, 31
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB22_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB22_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrins.d $a0, $zero, 30, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2147483648
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define i64 @bit_10_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_10_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a6, $a0, 1024
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB23_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB23_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1024
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1024
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_10_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_10_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: srli.w $a0, $a0, 10
+; LA32-NEXT: andi $a6, $a0, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB24_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB24_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 10, 10
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1024
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_11_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_11_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a0, 20
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bgez $a6, .LBB25_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB25_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2048
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2048
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_11_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_11_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: srli.w $a0, $a0, 11
+; LA32-NEXT: andi $a6, $a0, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB26_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB26_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 11, 11
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2048
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_20_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_20_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a0, 11
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bgez $a6, .LBB27_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB27_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a3, 256
+; LA64-NEXT: and $a0, $a0, $a3
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1048576
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_20_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_20_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: srli.w $a0, $a0, 20
+; LA32-NEXT: andi $a6, $a0, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB28_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB28_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 20, 20
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1048576
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_31_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_31_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: bgez $a0, .LBB29_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a2, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB29_2:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a3, -524288
+; LA64-NEXT: lu32i.d $a3, 0
+; LA64-NEXT: and $a0, $a0, $a3
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2147483648
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_31_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_31_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: srli.w $a6, $a0, 31
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB30_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB30_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 31
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2147483648
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_32_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_32_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a6, $a1, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB31_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB31_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: ori $a3, $zero, 0
+; LA64-NEXT: lu32i.d $a3, 1
+; LA64-NEXT: and $a0, $a0, $a3
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 4294967296
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_32_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_32_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a6, $a1, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB32_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB32_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 32, 32
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 4294967296
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_55_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_55_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a1, 8
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bgez $a6, .LBB33_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB33_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_55_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu52i.d $a3, $zero, 8
+; LA64-NEXT: and $a0, $a0, $a3
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 36028797018963968
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_55_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_55_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: srli.w $a0, $a1, 23
+; LA32-NEXT: andi $a6, $a0, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB34_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB34_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_55_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 55, 55
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 36028797018963968
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_63_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_63_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bgez $a1, .LBB35_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a3, $a5
+; LA32-NEXT: .LBB35_2:
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_63_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrins.d $a0, $zero, 62, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 9223372036854775808
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_63_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_63_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: srli.w $a6, $a1, 31
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB36_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB36_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_63_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: srli.d $a0, $a0, 63
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 9223372036854775808
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define void @bit_10_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_10_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1024
+; LA32-NEXT: bne $a0, $zero, .LBB37_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB37_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1024
+; LA64-NEXT: bnez $a0, .LBB37_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB37_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 1024
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_10_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_10_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1024
+; LA32-NEXT: beq $a0, $zero, .LBB38_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB38_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1024
+; LA64-NEXT: beqz $a0, .LBB38_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB38_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 1024
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_11_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2048
+; LA32-NEXT: bne $a0, $zero, .LBB39_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB39_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2048
+; LA64-NEXT: bnez $a0, .LBB39_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB39_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 2048
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_11_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2048
+; LA32-NEXT: beq $a0, $zero, .LBB40_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB40_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2048
+; LA64-NEXT: beqz $a0, .LBB40_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB40_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 2048
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_24_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4096
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: bne $a0, $zero, .LBB41_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB41_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_24_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a1, 4096
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: bnez $a0, .LBB41_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB41_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 16777216
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_24_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4096
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB42_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB42_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_24_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a1, 4096
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: beqz $a0, .LBB42_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB42_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 16777216
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_31_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, -524288
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: bne $a0, $zero, .LBB43_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB43_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrins.d $a0, $zero, 30, 0
+; LA64-NEXT: bnez $a0, .LBB43_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB43_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 2147483648
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_31_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, -524288
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB44_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB44_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrins.d $a0, $zero, 30, 0
+; LA64-NEXT: beqz $a0, .LBB44_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB44_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 2147483648
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_10_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_10_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1024
+; LA32-NEXT: bne $a0, $zero, .LBB45_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB45_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1024
+; LA64-NEXT: bnez $a0, .LBB45_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB45_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 1024
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_10_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_10_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1024
+; LA32-NEXT: beq $a0, $zero, .LBB46_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB46_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1024
+; LA64-NEXT: beqz $a0, .LBB46_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB46_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 1024
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_11_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2048
+; LA32-NEXT: bne $a0, $zero, .LBB47_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB47_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2048
+; LA64-NEXT: bnez $a0, .LBB47_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB47_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 2048
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_11_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2048
+; LA32-NEXT: beq $a0, $zero, .LBB48_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB48_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2048
+; LA64-NEXT: beqz $a0, .LBB48_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB48_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 2048
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_24_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4096
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: bne $a0, $zero, .LBB49_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB49_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_24_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a1, 4096
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: bnez $a0, .LBB49_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB49_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 16777216
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_24_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4096
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB50_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB50_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_24_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a1, 4096
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: beqz $a0, .LBB50_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB50_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 16777216
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_31_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, -524288
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: bne $a0, $zero, .LBB51_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB51_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a1, -524288
+; LA64-NEXT: lu32i.d $a1, 0
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: bnez $a0, .LBB51_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB51_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 2147483648
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_31_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, -524288
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB52_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB52_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu12i.w $a1, -524288
+; LA64-NEXT: lu32i.d $a1, 0
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: beqz $a0, .LBB52_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB52_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 2147483648
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_32_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_32_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a1, 1
+; LA32-NEXT: bne $a0, $zero, .LBB53_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB53_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: ori $a1, $zero, 0
+; LA64-NEXT: lu32i.d $a1, 1
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: bnez $a0, .LBB53_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB53_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 4294967296
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_32_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_32_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a1, 1
+; LA32-NEXT: beq $a0, $zero, .LBB54_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB54_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: ori $a1, $zero, 0
+; LA64-NEXT: lu32i.d $a1, 1
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: beqz $a0, .LBB54_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB54_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 4294967296
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_62_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_62_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a0, 262144
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: bne $a0, $zero, .LBB55_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB55_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_62_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu52i.d $a1, $zero, 1024
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: bnez $a0, .LBB55_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB55_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 4611686018427387904
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_62_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_62_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a0, 262144
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: beq $a0, $zero, .LBB56_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB56_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_62_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: lu52i.d $a1, $zero, 1024
+; LA64-NEXT: and $a0, $a0, $a1
+; LA64-NEXT: beqz $a0, .LBB56_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB56_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 4611686018427387904
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_63_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_63_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a0, -524288
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: bne $a0, $zero, .LBB57_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB57_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_63_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrins.d $a0, $zero, 62, 0
+; LA64-NEXT: bnez $a0, .LBB57_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB57_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 9223372036854775808
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_63_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_63_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a0, -524288
+; LA32-NEXT: and $a0, $a1, $a0
+; LA32-NEXT: beq $a0, $zero, .LBB58_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB58_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_63_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrins.d $a0, $zero, 62, 0
+; LA64-NEXT: beqz $a0, .LBB58_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB58_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 9223372036854775808
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define signext i32 @bit_10_1_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_10_1_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a3, $a0, 1023
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: beq $a3, $zero, .LBB59_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB59_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_1_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1023
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_10_1_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_10_1_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a3, $a0, 1023
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB60_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB60_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_1_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1023
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_11_1_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_11_1_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a3, $a0, 2047
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: beq $a3, $zero, .LBB61_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB61_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_1_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2047
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_11_1_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_11_1_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a3, $a0, 2047
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB62_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB62_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_1_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2047
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_16_1_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_16_1_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 16
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: beq $a3, $zero, .LBB63_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB63_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_16_1_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 65535
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_16_1_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_16_1_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 16
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB64_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB64_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_16_1_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 65535
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_20_1_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_20_1_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 12
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: beq $a3, $zero, .LBB65_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB65_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_1_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 19, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1048575
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_20_1_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_20_1_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 12
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB66_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB66_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_1_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 19, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 1048575
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_31_1_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_31_1_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 1
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: beq $a3, $zero, .LBB67_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB67_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_1_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2147483647
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_31_1_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_31_1_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a3, $a0, 1
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: bne $a3, $zero, .LBB68_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: .LBB68_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_1_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 2147483647
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_32_1_z_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_32_1_z_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: beq $a0, $zero, .LBB69_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a1, $a2
+; LA32-NEXT: .LBB69_2:
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_1_z_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 4294967295
+ %2 = icmp eq i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define signext i32 @bit_32_1_nz_select_i32(i32 signext %a, i32 signext %b, i32 signext %c) {
+; LA32-LABEL: bit_32_1_nz_select_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: bne $a0, $zero, .LBB70_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a1, $a2
+; LA32-NEXT: .LBB70_2:
+; LA32-NEXT: move $a0, $a1
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_1_nz_select_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i32 %a, 4294967295
+ %2 = icmp ne i32 %1, 0
+ %3 = select i1 %2, i32 %b, i32 %c
+ ret i32 %3
+}
+
+define i64 @bit_10_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_10_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a6, $a0, 1023
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB71_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB71_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1023
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_10_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_10_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a6, $a0, 1023
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB72_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB72_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1023
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_11_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_11_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a6, $a0, 2047
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB73_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB73_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2047
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_11_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_11_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a6, $a0, 2047
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB74_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB74_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2047
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_16_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_16_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a0, 16
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB75_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB75_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_16_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 65535
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_16_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_16_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: bne $a0, $zero, .LBB76_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a2, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB76_2:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_16_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 4294967295
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_20_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_20_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a0, 12
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB77_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB77_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 19, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1048575
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_20_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_20_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a0, 12
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB78_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB78_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_20_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 19, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 1048575
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_31_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_31_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a0, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB79_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB79_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2147483647
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_31_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_31_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: slli.w $a6, $a0, 1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB80_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB80_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 2147483647
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_32_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_32_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: beq $a0, $zero, .LBB81_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a2, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB81_2:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 4294967295
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_32_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_32_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: bne $a0, $zero, .LBB82_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a2, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB82_2:
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 4294967295
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_55_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_55_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a6, 2047
+; LA32-NEXT: ori $a6, $a6, 4095
+; LA32-NEXT: and $a1, $a1, $a6
+; LA32-NEXT: or $a6, $a0, $a1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB83_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB83_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_55_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 54, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 36028797018963967
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_55_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_55_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a6, 2047
+; LA32-NEXT: ori $a6, $a6, 4095
+; LA32-NEXT: and $a1, $a1, $a6
+; LA32-NEXT: or $a6, $a0, $a1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB84_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB84_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_55_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 54, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 36028797018963967
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_63_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_63_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a6, 524287
+; LA32-NEXT: ori $a6, $a6, 4095
+; LA32-NEXT: and $a1, $a1, $a6
+; LA32-NEXT: or $a6, $a0, $a1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB85_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB85_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_63_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 62, 0
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 9223372036854775807
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_63_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_63_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a6, 524287
+; LA32-NEXT: ori $a6, $a6, 4095
+; LA32-NEXT: and $a1, $a1, $a6
+; LA32-NEXT: or $a6, $a0, $a1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB86_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB86_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_63_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 62, 0
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 9223372036854775807
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_64_1_z_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_64_1_z_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: or $a6, $a0, $a1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: beq $a6, $zero, .LBB87_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB87_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_64_1_z_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: sltui $a0, $a0, 1
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 18446744073709551615
+ %2 = icmp eq i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define i64 @bit_64_1_nz_select_i64(i64 %a, i64 %b, i64 %c) {
+; LA32-LABEL: bit_64_1_nz_select_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: or $a6, $a0, $a1
+; LA32-NEXT: move $a1, $a3
+; LA32-NEXT: move $a0, $a2
+; LA32-NEXT: bne $a6, $zero, .LBB88_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: move $a0, $a4
+; LA32-NEXT: move $a1, $a5
+; LA32-NEXT: .LBB88_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_64_1_nz_select_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: sltu $a0, $zero, $a0
+; LA64-NEXT: masknez $a2, $a2, $a0
+; LA64-NEXT: maskeqz $a0, $a1, $a0
+; LA64-NEXT: or $a0, $a0, $a2
+; LA64-NEXT: ret
+ %1 = and i64 %a, 18446744073709551615
+ %2 = icmp ne i64 %1, 0
+ %3 = select i1 %2, i64 %b, i64 %c
+ ret i64 %3
+}
+
+define void @bit_10_1_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_10_1_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1023
+; LA32-NEXT: beq $a0, $zero, .LBB89_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB89_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_10_1_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: beqz $a0, .LBB89_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB89_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i32 %0, 1023
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_10_1_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_10_1_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1023
+; LA32-NEXT: beq $a0, $zero, .LBB90_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB90_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_1_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: beqz $a0, .LBB90_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB90_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 1023
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_1_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_11_1_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2047
+; LA32-NEXT: beq $a0, $zero, .LBB91_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB91_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_11_1_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: beqz $a0, .LBB91_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB91_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i32 %0, 2047
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_1_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_11_1_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2047
+; LA32-NEXT: beq $a0, $zero, .LBB92_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB92_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_1_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: beqz $a0, .LBB92_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB92_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 2047
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_16_1_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_16_1_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 15
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB93_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB93_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_16_1_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT: beqz $a0, .LBB93_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB93_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i32 %0, 65535
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_16_1_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_16_1_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 15
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB94_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB94_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_16_1_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT: beqz $a0, .LBB94_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB94_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 65535
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_1_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_24_1_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4095
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB95_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB95_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_24_1_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 23, 0
+; LA64-NEXT: beqz $a0, .LBB95_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB95_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i32 %0, 16777215
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_1_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_24_1_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4095
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB96_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB96_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_24_1_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 23, 0
+; LA64-NEXT: beqz $a0, .LBB96_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB96_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 16777215
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_1_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_31_1_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 524287
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB97_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB97_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_31_1_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: beqz $a0, .LBB97_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB97_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i32 %0, 2147483647
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_1_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_31_1_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 524287
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB98_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB98_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_1_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: beqz $a0, .LBB98_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB98_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 2147483647
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_32_1_z_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_32_1_z_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: beq $a0, $zero, .LBB99_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB99_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_32_1_z_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: beqz $a0, .LBB99_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB99_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i32 %0, 4294967295
+ %3 = icmp eq i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_32_1_nz_branch_i32(i32 signext %0) {
+; LA32-LABEL: bit_32_1_nz_branch_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: beq $a0, $zero, .LBB100_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB100_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_1_nz_branch_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: beqz $a0, .LBB100_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB100_2:
+; LA64-NEXT: ret
+ %2 = and i32 %0, 4294967295
+ %3 = icmp ne i32 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_10_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_10_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1023
+; LA32-NEXT: beq $a0, $zero, .LBB101_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB101_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_10_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: beqz $a0, .LBB101_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB101_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 1023
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_10_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_10_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 1023
+; LA32-NEXT: beq $a0, $zero, .LBB102_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB102_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_10_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 1023
+; LA64-NEXT: beqz $a0, .LBB102_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB102_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 1023
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_11_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2047
+; LA32-NEXT: beq $a0, $zero, .LBB103_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB103_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_11_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: beqz $a0, .LBB103_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB103_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 2047
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_11_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_11_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: andi $a0, $a0, 2047
+; LA32-NEXT: beq $a0, $zero, .LBB104_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB104_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_11_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: andi $a0, $a0, 2047
+; LA64-NEXT: beqz $a0, .LBB104_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB104_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 2047
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_16_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_16_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 15
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB105_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB105_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_16_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT: beqz $a0, .LBB105_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB105_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 65535
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_16_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_16_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 15
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB106_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB106_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_16_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 15, 0
+; LA64-NEXT: beqz $a0, .LBB106_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB106_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 65535
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_24_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4095
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB107_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB107_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_24_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 23, 0
+; LA64-NEXT: beqz $a0, .LBB107_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB107_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 16777215
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_24_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_24_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 4095
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB108_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB108_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_24_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 23, 0
+; LA64-NEXT: beqz $a0, .LBB108_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB108_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 16777215
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_31_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 524287
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB109_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB109_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_31_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: beqz $a0, .LBB109_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB109_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 2147483647
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_31_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_31_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a1, 524287
+; LA32-NEXT: ori $a1, $a1, 4095
+; LA32-NEXT: and $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB110_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB110_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_31_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 30, 0
+; LA64-NEXT: beqz $a0, .LBB110_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB110_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 2147483647
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_32_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_32_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: beq $a0, $zero, .LBB111_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB111_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_32_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: beqz $a0, .LBB111_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB111_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 4294967295
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_32_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_32_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: beq $a0, $zero, .LBB112_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB112_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_32_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT: beqz $a0, .LBB112_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB112_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 4294967295
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_62_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_62_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a2, 262143
+; LA32-NEXT: ori $a2, $a2, 4095
+; LA32-NEXT: and $a1, $a1, $a2
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB113_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB113_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_62_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 61, 0
+; LA64-NEXT: beqz $a0, .LBB113_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB113_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 4611686018427387903
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_62_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_62_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a2, 262143
+; LA32-NEXT: ori $a2, $a2, 4095
+; LA32-NEXT: and $a1, $a1, $a2
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB114_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB114_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_62_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 61, 0
+; LA64-NEXT: beqz $a0, .LBB114_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB114_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 4611686018427387903
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_63_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_63_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a2, 524287
+; LA32-NEXT: ori $a2, $a2, 4095
+; LA32-NEXT: and $a1, $a1, $a2
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB115_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB115_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_63_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 62, 0
+; LA64-NEXT: beqz $a0, .LBB115_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB115_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 9223372036854775807
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_63_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_63_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: lu12i.w $a2, 524287
+; LA32-NEXT: ori $a2, $a2, 4095
+; LA32-NEXT: and $a1, $a1, $a2
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB116_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB116_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_63_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: bstrpick.d $a0, $a0, 62, 0
+; LA64-NEXT: beqz $a0, .LBB116_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB116_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 9223372036854775807
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_64_1_z_branch_i64(i64 %0) {
+; LA32-LABEL: bit_64_1_z_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB117_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: ret
+; LA32-NEXT: .LBB117_2:
+; LA32-NEXT: b bar
+;
+; LA64-LABEL: bit_64_1_z_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: beqz $a0, .LBB117_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: ret
+; LA64-NEXT: .LBB117_2:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+ %2 = and i64 %0, 18446744073709551615
+ %3 = icmp eq i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
+
+define void @bit_64_1_nz_branch_i64(i64 %0) {
+; LA32-LABEL: bit_64_1_nz_branch_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: beq $a0, $zero, .LBB118_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: b bar
+; LA32-NEXT: .LBB118_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: bit_64_1_nz_branch_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: beqz $a0, .LBB118_2
+; LA64-NEXT: # %bb.1:
+; LA64-NEXT: pcaddu18i $t8, %call36(bar)
+; LA64-NEXT: jr $t8
+; LA64-NEXT: .LBB118_2:
+; LA64-NEXT: ret
+ %2 = and i64 %0, 18446744073709551615
+ %3 = icmp ne i64 %2, 0
+ br i1 %3, label %4, label %5
+
+4:
+ tail call void @bar()
+ br label %5
+
+5:
+ ret void
+}
diff --git a/llvm/test/CodeGen/LoongArch/select-const.ll b/llvm/test/CodeGen/LoongArch/select-const.ll
index e9506b3a83592..00a64b8664801 100644
--- a/llvm/test/CodeGen/LoongArch/select-const.ll
+++ b/llvm/test/CodeGen/LoongArch/select-const.ll
@@ -301,3 +301,28 @@ define i32 @select_ne_10001_10002(i32 signext %a, i32 signext %b) {
%2 = select i1 %1, i32 10001, i32 10002
ret i32 %2
}
+
+define i32 @select_slt_zero_constant1_constant2(i32 signext %x) {
+; LA32-LABEL: select_slt_zero_constant1_constant2:
+; LA32: # %bb.0:
+; LA32-NEXT: move $a1, $a0
+; LA32-NEXT: ori $a0, $zero, 7
+; LA32-NEXT: bltz $a1, .LBB16_2
+; LA32-NEXT: # %bb.1:
+; LA32-NEXT: addi.w $a0, $zero, -3
+; LA32-NEXT: .LBB16_2:
+; LA32-NEXT: ret
+;
+; LA64-LABEL: select_slt_zero_constant1_constant2:
+; LA64: # %bb.0:
+; LA64-NEXT: slti $a0, $a0, 0
+; LA64-NEXT: addi.w $a1, $zero, -3
+; LA64-NEXT: masknez $a1, $a1, $a0
+; LA64-NEXT: ori $a2, $zero, 7
+; LA64-NEXT: maskeqz $a0, $a2, $a0
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: ret
+ %cmp = icmp slt i32 %x, 0
+ %cond = select i1 %cmp, i32 7, i32 -3
+ ret i32 %cond
+}
>From 74072e22008136623b09a1e6b7d82f9fe99e9938 Mon Sep 17 00:00:00 2001
From: WANG Rui <wangrui at loongson.cn>
Date: Sat, 2 Aug 2025 11:43:35 +0800
Subject: [PATCH 4/4] Perform DAG combine for LoongArchISD::SELECT_CC
---
.../LoongArch/LoongArchISelLowering.cpp | 53 +++++++++++++++++++
1 file changed, 53 insertions(+)
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 23ec33213c2cf..56cccb4afd014 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -5191,6 +5191,57 @@ static SDValue performBR_CCCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+static SDValue performSELECT_CCCombine(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const LoongArchSubtarget &Subtarget) {
+ // Transform
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ SDValue CC = N->getOperand(2);
+ ISD::CondCode CCVal = cast<CondCodeSDNode>(CC)->get();
+ SDValue TrueV = N->getOperand(3);
+ SDValue FalseV = N->getOperand(4);
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+
+ // If the True and False values are the same, we don't need a select_cc.
+ if (TrueV == FalseV)
+ return TrueV;
+
+ // (select (x < 0), y, z) -> x >> (GRLEN - 1) & (y - z) + z
+ // (select (x >= 0), y, z) -> x >> (GRLEN - 1) & (z - y) + y
+ if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
+ isNullConstant(RHS) &&
+ (CCVal == ISD::CondCode::SETLT || CCVal == ISD::CondCode::SETGE)) {
+ if (CCVal == ISD::CondCode::SETGE)
+ std::swap(TrueV, FalseV);
+
+ int64_t TrueSImm = cast<ConstantSDNode>(TrueV)->getSExtValue();
+ int64_t FalseSImm = cast<ConstantSDNode>(FalseV)->getSExtValue();
+ // Only handle simm12, if it is not in this range, it can be considered as
+ // register.
+ if (isInt<12>(TrueSImm) && isInt<12>(FalseSImm) &&
+ isInt<12>(TrueSImm - FalseSImm)) {
+ SDValue SRA =
+ DAG.getNode(ISD::SRA, DL, VT, LHS,
+ DAG.getConstant(Subtarget.getGRLen() - 1, DL, VT));
+ SDValue AND =
+ DAG.getNode(ISD::AND, DL, VT, SRA,
+ DAG.getSignedConstant(TrueSImm - FalseSImm, DL, VT));
+ return DAG.getNode(ISD::ADD, DL, VT, AND, FalseV);
+ }
+
+ if (CCVal == ISD::CondCode::SETGE)
+ std::swap(TrueV, FalseV);
+ }
+
+ if (combine_CC(LHS, RHS, CC, DL, DAG, Subtarget))
+ return DAG.getNode(LoongArchISD::SELECT_CC, DL, N->getValueType(0),
+ {LHS, RHS, CC, TrueV, FalseV});
+
+ return SDValue();
+}
+
template <unsigned N>
static SDValue legalizeIntrinsicImmArg(SDNode *Node, unsigned ImmOp,
SelectionDAG &DAG,
@@ -5885,6 +5936,8 @@ SDValue LoongArchTargetLowering::PerformDAGCombine(SDNode *N,
return performBITREV_WCombine(N, DAG, DCI, Subtarget);
case LoongArchISD::BR_CC:
return performBR_CCCombine(N, DAG, DCI, Subtarget);
+ case LoongArchISD::SELECT_CC:
+ return performSELECT_CCCombine(N, DAG, DCI, Subtarget);
case ISD::INTRINSIC_WO_CHAIN:
return performINTRINSIC_WO_CHAINCombine(N, DAG, DCI, Subtarget);
case LoongArchISD::MOVGR2FR_W_LA64:
More information about the llvm-commits
mailing list