[llvm] f064a99 - [DAGCombine] Optimize away cond ? 1 : 0 post-legalization (#186771)

via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 20 08:23:23 PDT 2026


Author: Nikita Popov
Date: 2026-03-20T16:23:18+01:00
New Revision: f064a9979ff5ddf69af66f524748def5ed852440

URL: https://github.com/llvm/llvm-project/commit/f064a9979ff5ddf69af66f524748def5ed852440
DIFF: https://github.com/llvm/llvm-project/commit/f064a9979ff5ddf69af66f524748def5ed852440.diff

LOG: [DAGCombine] Optimize away cond ? 1 : 0 post-legalization (#186771)

Selects of the form `cond ? 1 : 0` are created during unrolling of
setcc+vselect. Currently these are not optimized away post-legalization
even if fully redundant. Having these extra selects sitting between
things can prevent other folds from applying.

Enabling this requires some mitigations in the ARM backend, in
particular in the interaction with MVE support. There's two changes
here:

* Form CSINV/CSNEG/CSINC from CMOV, rather than only creating it during
SELECT_CC lowering. (After this change, the lowering in SELECT_CC can be
dropped without test changes, let me know if I should do that.)
* Support pushing negations through CMOV in more cases, in particular if
the operands are constant or the negation can be handled by flipping
lshr/ashr.

Additionally, in the X86 backend, try to simplify CMOV to SETCC if only the
low bit is demanded.

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/lib/Target/ARM/ARMISelLowering.cpp
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/ARM/select_const.ll
    llvm/test/CodeGen/Thumb2/mve-pred-and.ll
    llvm/test/CodeGen/Thumb2/mve-pred-vselect.ll
    llvm/test/CodeGen/Thumb2/mve-pred-xor.ll
    llvm/test/CodeGen/Thumb2/mve-vselect-constants.ll
    llvm/test/CodeGen/VE/Vector/vec_divrem.ll
    llvm/test/CodeGen/WebAssembly/vector-extract-last-active.ll
    llvm/test/CodeGen/X86/vec-strict-cmp-128-fp16.ll
    llvm/test/CodeGen/X86/vec-strict-cmp-sub128.ll
    llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 36ebcccef7e61..82f8fd572bf19 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -12442,7 +12442,6 @@ SDValue DAGCombiner::foldSelectOfConstants(SDNode *N) {
     return SDValue();
 
   if (CondVT != MVT::i1 || LegalOperations) {
-    // fold (select Cond, 0, 1) -> (xor Cond, 1)
     // We can't do this reliably if integer based booleans have 
diff erent contents
     // to floating point based booleans. This is because we can't tell whether we
     // have an integer-based boolean or a floating-point-based boolean unless we
@@ -12455,13 +12454,19 @@ SDValue DAGCombiner::foldSelectOfConstants(SDNode *N) {
         TLI.getBooleanContents(/*isVec*/false, /*isFloat*/true) ==
             TargetLowering::ZeroOrOneBooleanContent &&
         TLI.getBooleanContents(/*isVec*/false, /*isFloat*/false) ==
-            TargetLowering::ZeroOrOneBooleanContent &&
-        C1->isZero() && C2->isOne()) {
-      SDValue NotCond =
-          DAG.getNode(ISD::XOR, DL, CondVT, Cond, DAG.getConstant(1, DL, CondVT));
-      if (VT.bitsEq(CondVT))
-        return NotCond;
-      return DAG.getZExtOrTrunc(NotCond, DL, VT);
+            TargetLowering::ZeroOrOneBooleanContent) {
+      // fold (select Cond, 0, 1) -> (xor Cond, 1)
+      if (C1->isZero() && C2->isOne()) {
+        SDValue NotCond = DAG.getNode(ISD::XOR, DL, CondVT, Cond,
+                                      DAG.getConstant(1, DL, CondVT));
+        if (VT.bitsEq(CondVT))
+          return NotCond;
+        return DAG.getZExtOrTrunc(NotCond, DL, VT);
+      }
+
+      // fold (select Cond, 1, 0) -> Cond
+      if (C1->isOne() && C2->isZero() && CondVT == VT)
+        return Cond;
     }
 
     return SDValue();

diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 970c962197ac0..0aa9f4c89b40a 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -13937,8 +13937,25 @@ static SDValue PerformSubCSINCCombine(SDNode *N, SelectionDAG &DAG) {
                      CSINC.getOperand(3));
 }
 
-static bool isNegatedInteger(SDValue Op) {
-  return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0));
+static int getNegationCost(SDValue Op) {
+  // Free to negate.
+  if (isa<ConstantSDNode>(Op))
+    return 0;
+
+  // Will save one instruction.
+  if (Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)))
+    return -1;
+
+  // Can freely negate by converting sra <-> srl.
+  if (Op.getOpcode() == ISD::SRA || Op.getOpcode() == ISD::SRL) {
+    ConstantSDNode *ShiftAmt = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+    if (Op.hasOneUse() && ShiftAmt &&
+        ShiftAmt->getZExtValue() == Op.getValueType().getScalarSizeInBits() - 1)
+      return 0;
+  }
+
+  // Will have to create sub.
+  return 1;
 }
 
 // Try to fold
@@ -13948,7 +13965,8 @@ static bool isNegatedInteger(SDValue Op) {
 // The folding helps cmov to be matched with csneg without generating
 // redundant neg instruction.
 static SDValue performNegCMovCombine(SDNode *N, SelectionDAG &DAG) {
-  if (!isNegatedInteger(SDValue(N, 0)))
+  assert(N->getOpcode() == ISD::SUB);
+  if (!isNullConstant(N->getOperand(0)))
     return SDValue();
 
   SDValue CMov = N->getOperand(1);
@@ -13958,9 +13976,8 @@ static SDValue performNegCMovCombine(SDNode *N, SelectionDAG &DAG) {
   SDValue N0 = CMov.getOperand(0);
   SDValue N1 = CMov.getOperand(1);
 
-  // If neither of them are negations, it's not worth the folding as it
-  // introduces two additional negations while reducing one negation.
-  if (!isNegatedInteger(N0) && !isNegatedInteger(N1))
+  // Only perform the fold if we actually save something.
+  if (getNegationCost(N0) + getNegationCost(N1) > 0)
     return SDValue();
 
   SDLoc DL(N);
@@ -18358,18 +18375,33 @@ ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
 SDValue
 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
+  SDLoc dl(N);
+  EVT VT = N->getValueType(0);
+  SDValue FalseVal = N->getOperand(0);
+  SDValue TrueVal = N->getOperand(1);
+  SDValue ARMcc = N->getOperand(2);
   SDValue Cmp = N->getOperand(3);
+
+  // Try to form CSINV etc.
+  unsigned Opcode;
+  bool InvertCond;
+  if (SDValue CSetOp =
+          matchCSET(Opcode, InvertCond, TrueVal, FalseVal, Subtarget)) {
+    if (InvertCond) {
+      ARMCC::CondCodes CondCode =
+          (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue();
+      CondCode = ARMCC::getOppositeCondition(CondCode);
+      ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32);
+    }
+    return DAG.getNode(Opcode, dl, VT, CSetOp, CSetOp, ARMcc, Cmp);
+  }
+
   if (Cmp.getOpcode() != ARMISD::CMPZ)
     // Only looking at EQ and NE cases.
     return SDValue();
 
-  EVT VT = N->getValueType(0);
-  SDLoc dl(N);
   SDValue LHS = Cmp.getOperand(0);
   SDValue RHS = Cmp.getOperand(1);
-  SDValue FalseVal = N->getOperand(0);
-  SDValue TrueVal = N->getOperand(1);
-  SDValue ARMcc = N->getOperand(2);
   ARMCC::CondCodes CC = (ARMCC::CondCodes)ARMcc->getAsZExtVal();
 
   // BFI is only available on V6T2+.

diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 1d3278b23b913..27d4b8055d83e 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -45506,6 +45506,22 @@ bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
                              OriginalDemandedElts, Known, TLO, Depth + 1))
       return true;
 
+    // Check whether we can fold away a cond ? 0 : 1 cmov after shrinking
+    // operands based on demanded bits.
+    KnownBits KnownFalseVal =
+        Known & KnownBits::makeConstant(OriginalDemandedBits);
+    KnownBits KnownTrueVal =
+        Known2 & KnownBits::makeConstant(OriginalDemandedBits);
+    if (KnownFalseVal.isZero() && KnownTrueVal.isConstant() &&
+        KnownTrueVal.getConstant().isOne() &&
+        Op.getValueType().isScalarInteger()) {
+      SDLoc DL(Op);
+      X86::CondCode CC = (X86::CondCode)Op.getConstantOperandVal(2);
+      SDValue SetCC = getSETCC(CC, Op.getOperand(3), DL, TLO.DAG);
+      return TLO.CombineTo(
+          Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, DL, Op.getValueType(), SetCC));
+    }
+
     // Only known if known in both the LHS and RHS.
     Known = Known.intersectWith(Known2);
     return false;

diff  --git a/llvm/test/CodeGen/ARM/select_const.ll b/llvm/test/CodeGen/ARM/select_const.ll
index 180daa12e7c52..77b67cc40f707 100644
--- a/llvm/test/CodeGen/ARM/select_const.ll
+++ b/llvm/test/CodeGen/ARM/select_const.ll
@@ -628,7 +628,6 @@ define i64 @opaque_constant1(i1 %cond, i64 %x) {
 ; ARM-NEXT:    orr lr, lr, #65536
 ; ARM-NEXT:    mvnne r0, #3
 ; ARM-NEXT:    and r4, r0, lr
-; ARM-NEXT:    movne r12, #1
 ; ARM-NEXT:    subs r0, r4, #1
 ; ARM-NEXT:    eor r2, r2, lr
 ; ARM-NEXT:    eor r3, r3, #1
@@ -646,12 +645,10 @@ define i64 @opaque_constant1(i1 %cond, i64 %x) {
 ; THUMB2-NEXT:    ands r12, r0, #1
 ; THUMB2-NEXT:    mov.w lr, #1
 ; THUMB2-NEXT:    it ne
-; THUMB2-NEXT:    movne.w r12, #1
-; THUMB2-NEXT:    it ne
 ; THUMB2-NEXT:    movne.w lr, #65536
 ; THUMB2-NEXT:    subs.w r0, lr, #1
-; THUMB2-NEXT:    eor r3, r3, #1
 ; THUMB2-NEXT:    sbc r1, r12, #0
+; THUMB2-NEXT:    eor r3, r3, #1
 ; THUMB2-NEXT:    eor r2, r2, #65537
 ; THUMB2-NEXT:    orrs r2, r3
 ; THUMB2-NEXT:    itt ne
@@ -663,42 +660,38 @@ define i64 @opaque_constant1(i1 %cond, i64 %x) {
 ; THUMB:       @ %bb.0:
 ; THUMB-NEXT:    .save {r4, r5, r6, r7, lr}
 ; THUMB-NEXT:    push {r4, r5, r6, r7, lr}
-; THUMB-NEXT:    movs r7, #1
-; THUMB-NEXT:    ands r0, r7
-; THUMB-NEXT:    subs r1, r0, #1
-; THUMB-NEXT:    mov r12, r0
-; THUMB-NEXT:    mov r4, r12
-; THUMB-NEXT:    sbcs r4, r1
-; THUMB-NEXT:    cmp r0, #0
+; THUMB-NEXT:    movs r6, #1
+; THUMB-NEXT:    ands r0, r6
 ; THUMB-NEXT:    bne .LBB24_2
 ; THUMB-NEXT:  @ %bb.1:
 ; THUMB-NEXT:    movs r5, #23
 ; THUMB-NEXT:    b .LBB24_3
 ; THUMB-NEXT:  .LBB24_2:
-; THUMB-NEXT:    movs r0, #3
-; THUMB-NEXT:    mvns r5, r0
+; THUMB-NEXT:    movs r1, #3
+; THUMB-NEXT:    mvns r5, r1
 ; THUMB-NEXT:  .LBB24_3:
-; THUMB-NEXT:    ldr r0, .LCPI24_0
-; THUMB-NEXT:    ands r5, r0
-; THUMB-NEXT:    movs r6, #0
-; THUMB-NEXT:    subs r0, r5, #1
-; THUMB-NEXT:    mov r12, r4
+; THUMB-NEXT:    ldr r1, .LCPI24_0
+; THUMB-NEXT:    ands r5, r1
+; THUMB-NEXT:    movs r4, #0
+; THUMB-NEXT:    subs r7, r5, #1
+; THUMB-NEXT:    mov r12, r0
 ; THUMB-NEXT:    mov r1, r12
-; THUMB-NEXT:    sbcs r1, r6
-; THUMB-NEXT:    eors r3, r7
-; THUMB-NEXT:    ldr r6, .LCPI24_0
-; THUMB-NEXT:    eors r2, r6
+; THUMB-NEXT:    sbcs r1, r4
+; THUMB-NEXT:    eors r3, r6
+; THUMB-NEXT:    ldr r4, .LCPI24_0
+; THUMB-NEXT:    eors r2, r4
 ; THUMB-NEXT:    orrs r2, r3
 ; THUMB-NEXT:    cmp r2, #0
 ; THUMB-NEXT:    beq .LBB24_5
 ; THUMB-NEXT:  @ %bb.4:
-; THUMB-NEXT:    mov r12, r4
+; THUMB-NEXT:    mov r12, r0
 ; THUMB-NEXT:    mov r1, r12
 ; THUMB-NEXT:  .LBB24_5:
 ; THUMB-NEXT:    beq .LBB24_7
 ; THUMB-NEXT:  @ %bb.6:
-; THUMB-NEXT:    movs r0, r5
+; THUMB-NEXT:    movs r7, r5
 ; THUMB-NEXT:  .LBB24_7:
+; THUMB-NEXT:    movs r0, r7
 ; THUMB-NEXT:    pop {r4, r5, r6, r7}
 ; THUMB-NEXT:    pop {r2}
 ; THUMB-NEXT:    bx r2

diff  --git a/llvm/test/CodeGen/Thumb2/mve-pred-and.ll b/llvm/test/CodeGen/Thumb2/mve-pred-and.ll
index 42eec5e596a96..76e8d34aa3454 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pred-and.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-and.ll
@@ -607,11 +607,9 @@ define arm_aapcs_vfpcc <2 x i64> @cmpeq_v2i1(<2 x i64> %a, <2 x i64> %b, <2 x i6
 ; CHECK-NEXT:    vmov r1, r2, d0
 ; CHECK-NEXT:    orrs r1, r2
 ; CHECK-NEXT:    vmov r12, r2, d5
-; CHECK-NEXT:    cset r1, eq
-; CHECK-NEXT:    cmp r0, #0
-; CHECK-NEXT:    csel r0, zr, r1, ne
-; CHECK-NEXT:    movs r1, #0
-; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    mov.w r1, #0
+; CHECK-NEXT:    csetm r0, eq
 ; CHECK-NEXT:    bfi r1, r0, #0, #8
 ; CHECK-NEXT:    vmov r3, r0, d3
 ; CHECK-NEXT:    eors r0, r2
@@ -619,10 +617,8 @@ define arm_aapcs_vfpcc <2 x i64> @cmpeq_v2i1(<2 x i64> %a, <2 x i64> %b, <2 x i6
 ; CHECK-NEXT:    orrs r0, r2
 ; CHECK-NEXT:    vmov r2, r3, d1
 ; CHECK-NEXT:    orrs r2, r3
-; CHECK-NEXT:    cset r2, eq
-; CHECK-NEXT:    cmp r0, #0
-; CHECK-NEXT:    csel r0, zr, r2, ne
-; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    orrs r0, r2
+; CHECK-NEXT:    csetm r0, eq
 ; CHECK-NEXT:    bfi r1, r0, #8, #8
 ; CHECK-NEXT:    vmsr p0, r1
 ; CHECK-NEXT:    vpsel q0, q0, q1
@@ -644,11 +640,9 @@ define arm_aapcs_vfpcc <2 x i64> @cmpeqr_v2i1(<2 x i64> %a, <2 x i64> %b, i64 %c
 ; CHECK-NEXT:    orr.w r12, r2, r3
 ; CHECK-NEXT:    vmov r3, r2, d0
 ; CHECK-NEXT:    orrs r2, r3
-; CHECK-NEXT:    mov.w r3, #0
-; CHECK-NEXT:    cset r2, eq
-; CHECK-NEXT:    cmp.w r12, #0
-; CHECK-NEXT:    csel r2, zr, r2, ne
-; CHECK-NEXT:    rsbs r2, r2, #0
+; CHECK-NEXT:    movs r3, #0
+; CHECK-NEXT:    orrs.w r2, r2, r12
+; CHECK-NEXT:    csetm r2, eq
 ; CHECK-NEXT:    bfi r3, r2, #0, #8
 ; CHECK-NEXT:    vmov r12, r2, d3
 ; CHECK-NEXT:    eors r1, r2
@@ -656,10 +650,8 @@ define arm_aapcs_vfpcc <2 x i64> @cmpeqr_v2i1(<2 x i64> %a, <2 x i64> %b, i64 %c
 ; CHECK-NEXT:    orrs r0, r1
 ; CHECK-NEXT:    vmov r1, r2, d1
 ; CHECK-NEXT:    orrs r1, r2
-; CHECK-NEXT:    cset r1, eq
-; CHECK-NEXT:    cmp r0, #0
-; CHECK-NEXT:    csel r0, zr, r1, ne
-; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    orrs r0, r1
+; CHECK-NEXT:    csetm r0, eq
 ; CHECK-NEXT:    bfi r3, r0, #8, #8
 ; CHECK-NEXT:    vmsr p0, r3
 ; CHECK-NEXT:    vpsel q0, q0, q1

diff  --git a/llvm/test/CodeGen/Thumb2/mve-pred-vselect.ll b/llvm/test/CodeGen/Thumb2/mve-pred-vselect.ll
index c9a51f0507844..ba3d327e00594 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pred-vselect.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-vselect.ll
@@ -280,21 +280,23 @@ entry:
 define arm_aapcs_vfpcc <2 x i64> @cmpsltz_v2i1(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) {
 ; CHECK-LABEL: cmpsltz_v2i1:
 ; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov r2, s9
+; CHECK-NEXT:    vmov r1, s7
 ; CHECK-NEXT:    movs r3, #0
-; CHECK-NEXT:    vmov r0, s1
-; CHECK-NEXT:    vmov r1, s5
+; CHECK-NEXT:    vmov r2, s11
+; CHECK-NEXT:    vmov r0, s3
+; CHECK-NEXT:    vmov r12, s1
+; CHECK-NEXT:    asrs r1, r1, #31
 ; CHECK-NEXT:    cmp.w r3, r2, lsr #31
-; CHECK-NEXT:    vmov r2, s7
-; CHECK-NEXT:    csel r0, r0, r1, ne
-; CHECK-NEXT:    vmov r1, s3
-; CHECK-NEXT:    asr.w r12, r0, #31
-; CHECK-NEXT:    vmov r0, s11
+; CHECK-NEXT:    vmov r2, s5
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    asrne r1, r0, #31
+; CHECK-NEXT:    vmov r0, s9
+; CHECK-NEXT:    asrs r2, r2, #31
 ; CHECK-NEXT:    cmp.w r3, r0, lsr #31
-; CHECK-NEXT:    bfi r3, r12, #0, #8
-; CHECK-NEXT:    csel r0, r1, r2, ne
-; CHECK-NEXT:    asrs r0, r0, #31
-; CHECK-NEXT:    bfi r3, r0, #8, #8
+; CHECK-NEXT:    it ne
+; CHECK-NEXT:    asrne.w r2, r12, #31
+; CHECK-NEXT:    bfi r3, r2, #0, #8
+; CHECK-NEXT:    bfi r3, r1, #8, #8
 ; CHECK-NEXT:    vmsr p0, r3
 ; CHECK-NEXT:    vpsel q0, q0, q1
 ; CHECK-NEXT:    bx lr

diff  --git a/llvm/test/CodeGen/Thumb2/mve-pred-xor.ll b/llvm/test/CodeGen/Thumb2/mve-pred-xor.ll
index f4a0d5120305a..93b1484d902e3 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pred-xor.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pred-xor.ll
@@ -459,22 +459,22 @@ define arm_aapcs_vfpcc <2 x i64> @cmpeqz_v2i1(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-LABEL: cmpeqz_v2i1:
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    vmov r0, r1, d2
+; CHECK-NEXT:    vmov r2, r3, d0
 ; CHECK-NEXT:    orrs r0, r1
-; CHECK-NEXT:    vmov r1, r2, d0
-; CHECK-NEXT:    cset r0, eq
-; CHECK-NEXT:    orrs r1, r2
-; CHECK-NEXT:    it eq
-; CHECK-NEXT:    eoreq r0, r0, #1
-; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    cset r0, ne
+; CHECK-NEXT:    cset r1, eq
+; CHECK-NEXT:    orrs r2, r3
+; CHECK-NEXT:    csel r0, r0, r1, eq
 ; CHECK-NEXT:    movs r1, #0
+; CHECK-NEXT:    rsbs r0, r0, #0
 ; CHECK-NEXT:    bfi r1, r0, #0, #8
 ; CHECK-NEXT:    vmov r0, r2, d3
 ; CHECK-NEXT:    orrs r0, r2
-; CHECK-NEXT:    vmov r2, r3, d1
-; CHECK-NEXT:    cset r0, eq
-; CHECK-NEXT:    orrs r2, r3
-; CHECK-NEXT:    it eq
-; CHECK-NEXT:    eoreq r0, r0, #1
+; CHECK-NEXT:    vmov r3, r0, d1
+; CHECK-NEXT:    cset r12, ne
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    orrs r0, r3
+; CHECK-NEXT:    csel r0, r12, r2, eq
 ; CHECK-NEXT:    rsbs r0, r0, #0
 ; CHECK-NEXT:    bfi r1, r0, #8, #8
 ; CHECK-NEXT:    vmsr p0, r1
@@ -495,25 +495,25 @@ define arm_aapcs_vfpcc <2 x i64> @cmpeq_v2i1(<2 x i64> %a, <2 x i64> %b, <2 x i6
 ; CHECK-NEXT:    vmov r2, r3, d2
 ; CHECK-NEXT:    eors r1, r3
 ; CHECK-NEXT:    eors r0, r2
+; CHECK-NEXT:    vmov r2, r3, d0
 ; CHECK-NEXT:    orrs r0, r1
-; CHECK-NEXT:    vmov r1, r2, d0
-; CHECK-NEXT:    cset r0, eq
-; CHECK-NEXT:    orrs r1, r2
-; CHECK-NEXT:    it eq
-; CHECK-NEXT:    eoreq r0, r0, #1
-; CHECK-NEXT:    rsbs r0, r0, #0
+; CHECK-NEXT:    cset r0, ne
+; CHECK-NEXT:    cset r1, eq
+; CHECK-NEXT:    orrs r2, r3
+; CHECK-NEXT:    csel r0, r0, r1, eq
 ; CHECK-NEXT:    movs r1, #0
-; CHECK-NEXT:    bfi r1, r0, #0, #8
+; CHECK-NEXT:    rsbs r0, r0, #0
 ; CHECK-NEXT:    vmov r12, r2, d5
+; CHECK-NEXT:    bfi r1, r0, #0, #8
 ; CHECK-NEXT:    vmov r3, r0, d3
 ; CHECK-NEXT:    eors r0, r2
 ; CHECK-NEXT:    eor.w r2, r3, r12
 ; CHECK-NEXT:    orrs r0, r2
-; CHECK-NEXT:    vmov r2, r3, d1
-; CHECK-NEXT:    cset r0, eq
-; CHECK-NEXT:    orrs r2, r3
-; CHECK-NEXT:    it eq
-; CHECK-NEXT:    eoreq r0, r0, #1
+; CHECK-NEXT:    vmov r3, r0, d1
+; CHECK-NEXT:    cset r12, ne
+; CHECK-NEXT:    cset r2, eq
+; CHECK-NEXT:    orrs r0, r3
+; CHECK-NEXT:    csel r0, r12, r2, eq
 ; CHECK-NEXT:    rsbs r0, r0, #0
 ; CHECK-NEXT:    bfi r1, r0, #8, #8
 ; CHECK-NEXT:    vmsr p0, r1

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vselect-constants.ll b/llvm/test/CodeGen/Thumb2/mve-vselect-constants.ll
index 024de2b36667b..726237eb27f2d 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vselect-constants.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vselect-constants.ll
@@ -282,12 +282,12 @@ define arm_aapcs_vfpcc <2 x i64> @not_signbit_mask_v2i64(<2 x i64> %a, <2 x i64>
 ; CHECK-NEXT:    vmov r1, s1
 ; CHECK-NEXT:    movs r0, #0
 ; CHECK-NEXT:    vmov.i32 q2, #0x0
-; CHECK-NEXT:    mvns r1, r1
-; CHECK-NEXT:    asrs r1, r1, #31
+; CHECK-NEXT:    cmp.w r1, #-1
+; CHECK-NEXT:    csetm r1, gt
 ; CHECK-NEXT:    bfi r0, r1, #0, #8
 ; CHECK-NEXT:    vmov r1, s3
-; CHECK-NEXT:    mvns r1, r1
-; CHECK-NEXT:    asrs r1, r1, #31
+; CHECK-NEXT:    cmp.w r1, #-1
+; CHECK-NEXT:    csetm r1, gt
 ; CHECK-NEXT:    bfi r0, r1, #8, #8
 ; CHECK-NEXT:    vmsr p0, r0
 ; CHECK-NEXT:    vpsel q0, q1, q2

diff  --git a/llvm/test/CodeGen/VE/Vector/vec_divrem.ll b/llvm/test/CodeGen/VE/Vector/vec_divrem.ll
index 93e2889793ba5..a539fea168cc5 100644
--- a/llvm/test/CodeGen/VE/Vector/vec_divrem.ll
+++ b/llvm/test/CodeGen/VE/Vector/vec_divrem.ll
@@ -10,18 +10,19 @@ define <4 x i8> @udiv_by_minus_one(<4 x i8> %x) {
 ; CHECK-NEXT:    and %s4, %s0, (56)0
 ; CHECK-NEXT:    and %s1, %s1, (56)0
 ; CHECK-NEXT:    and %s2, %s2, (56)0
-; CHECK-NEXT:    and %s3, %s3, (56)0
+; CHECK-NEXT:    and %s0, %s3, (56)0
+; CHECK-NEXT:    lea %s5, 255
+; CHECK-NEXT:    cmps.w.sx %s6, %s0, %s5
 ; CHECK-NEXT:    or %s0, 0, (0)1
-; CHECK-NEXT:    cmpu.w %s5, %s3, (56)0
 ; CHECK-NEXT:    or %s3, 0, (0)1
-; CHECK-NEXT:    cmov.w.eq %s3, (63)0, %s5
-; CHECK-NEXT:    cmpu.w %s5, %s2, (56)0
+; CHECK-NEXT:    cmov.w.eq %s3, (63)0, %s6
+; CHECK-NEXT:    cmps.w.sx %s6, %s2, %s5
 ; CHECK-NEXT:    or %s2, 0, (0)1
-; CHECK-NEXT:    cmov.w.eq %s2, (63)0, %s5
-; CHECK-NEXT:    cmpu.w %s5, %s1, (56)0
+; CHECK-NEXT:    cmov.w.eq %s2, (63)0, %s6
+; CHECK-NEXT:    cmps.w.sx %s6, %s1, %s5
 ; CHECK-NEXT:    or %s1, 0, (0)1
-; CHECK-NEXT:    cmov.w.eq %s1, (63)0, %s5
-; CHECK-NEXT:    cmpu.w %s4, %s4, (56)0
+; CHECK-NEXT:    cmov.w.eq %s1, (63)0, %s6
+; CHECK-NEXT:    cmps.w.sx %s4, %s4, %s5
 ; CHECK-NEXT:    cmov.w.eq %s0, (63)0, %s4
 ; CHECK-NEXT:    b.l.t (, %s10)
   %r = udiv <4 x i8> %x, <i8 255, i8 255, i8 255, i8 255>

diff  --git a/llvm/test/CodeGen/WebAssembly/vector-extract-last-active.ll b/llvm/test/CodeGen/WebAssembly/vector-extract-last-active.ll
index ba1af61433cb3..927424dc649d5 100644
--- a/llvm/test/CodeGen/WebAssembly/vector-extract-last-active.ll
+++ b/llvm/test/CodeGen/WebAssembly/vector-extract-last-active.ll
@@ -24,18 +24,14 @@ define i32 @extract_last_active_v4i32(<4 x i32> %a, <4 x i1> %c) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.store 0
 ; CHECK-NEXT:    local.get 8
-; CHECK-NEXT:    i32.const 3
-; CHECK-NEXT:    i32.const 2
 ; CHECK-NEXT:    local.get 7
 ; CHECK-NEXT:    i32.const 1
 ; CHECK-NEXT:    i32.and
-; CHECK-NEXT:    i32.select
-; CHECK-NEXT:    i32.const 1
-; CHECK-NEXT:    i32.const 0
+; CHECK-NEXT:    i32.const 2
+; CHECK-NEXT:    i32.or
 ; CHECK-NEXT:    local.get 5
 ; CHECK-NEXT:    i32.const 1
 ; CHECK-NEXT:    i32.and
-; CHECK-NEXT:    i32.select
 ; CHECK-NEXT:    local.get 6
 ; CHECK-NEXT:    local.get 7
 ; CHECK-NEXT:    i32.or
@@ -83,18 +79,14 @@ define i32 @extract_last_active_v4i32_no_default(<4 x i32> %a, <4 x i1> %c) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.store 0
 ; CHECK-NEXT:    local.get 8
-; CHECK-NEXT:    i32.const 3
-; CHECK-NEXT:    i32.const 2
 ; CHECK-NEXT:    local.get 7
 ; CHECK-NEXT:    i32.const 1
 ; CHECK-NEXT:    i32.and
-; CHECK-NEXT:    i32.select
-; CHECK-NEXT:    i32.const 1
-; CHECK-NEXT:    i32.const 0
+; CHECK-NEXT:    i32.const 2
+; CHECK-NEXT:    i32.or
 ; CHECK-NEXT:    local.get 5
 ; CHECK-NEXT:    i32.const 1
 ; CHECK-NEXT:    i32.and
-; CHECK-NEXT:    i32.select
 ; CHECK-NEXT:    local.get 6
 ; CHECK-NEXT:    local.get 7
 ; CHECK-NEXT:    i32.or

diff  --git a/llvm/test/CodeGen/X86/vec-strict-cmp-128-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-cmp-128-fp16.ll
index 8c64dd2d9b49f..b14b5b118f280 100644
--- a/llvm/test/CodeGen/X86/vec-strict-cmp-128-fp16.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-cmp-128-fp16.ll
@@ -712,17 +712,15 @@ define <2 x i16> @test_v2f16_oeq_q(<2 x i16> %a, <2 x i16> %b, <2 x half> %f1, <
 ; X86-NEXT:    vucomish 8(%ebp), %xmm2
 ; X86-NEXT:    setnp %al
 ; X86-NEXT:    sete %cl
-; X86-NEXT:    testb %al, %cl
-; X86-NEXT:    setne %al
-; X86-NEXT:    andl $1, %eax
-; X86-NEXT:    kmovw %eax, %k0
+; X86-NEXT:    andb %al, %cl
+; X86-NEXT:    andl $1, %ecx
+; X86-NEXT:    kmovw %ecx, %k0
 ; X86-NEXT:    vpsrld $16, %xmm2, %xmm2
 ; X86-NEXT:    vucomish 10(%ebp), %xmm2
 ; X86-NEXT:    setnp %al
 ; X86-NEXT:    sete %cl
-; X86-NEXT:    testb %al, %cl
-; X86-NEXT:    setne %al
-; X86-NEXT:    kmovd %eax, %k1
+; X86-NEXT:    andb %al, %cl
+; X86-NEXT:    kmovd %ecx, %k1
 ; X86-NEXT:    kshiftlw $15, %k1, %k1
 ; X86-NEXT:    kshiftrw $14, %k1, %k1
 ; X86-NEXT:    korw %k1, %k0, %k1
@@ -736,18 +734,16 @@ define <2 x i16> @test_v2f16_oeq_q(<2 x i16> %a, <2 x i16> %b, <2 x half> %f1, <
 ; X64-NEXT:    vucomish %xmm3, %xmm2
 ; X64-NEXT:    setnp %al
 ; X64-NEXT:    sete %cl
-; X64-NEXT:    testb %al, %cl
-; X64-NEXT:    setne %al
-; X64-NEXT:    andl $1, %eax
-; X64-NEXT:    kmovw %eax, %k0
+; X64-NEXT:    andb %al, %cl
+; X64-NEXT:    andl $1, %ecx
+; X64-NEXT:    kmovw %ecx, %k0
 ; X64-NEXT:    vpsrld $16, %xmm3, %xmm3
 ; X64-NEXT:    vpsrld $16, %xmm2, %xmm2
 ; X64-NEXT:    vucomish %xmm3, %xmm2
 ; X64-NEXT:    setnp %al
 ; X64-NEXT:    sete %cl
-; X64-NEXT:    testb %al, %cl
-; X64-NEXT:    setne %al
-; X64-NEXT:    kmovd %eax, %k1
+; X64-NEXT:    andb %al, %cl
+; X64-NEXT:    kmovd %ecx, %k1
 ; X64-NEXT:    kshiftlw $15, %k1, %k1
 ; X64-NEXT:    kshiftrw $14, %k1, %k1
 ; X64-NEXT:    korw %k1, %k0, %k1

diff  --git a/llvm/test/CodeGen/X86/vec-strict-cmp-sub128.ll b/llvm/test/CodeGen/X86/vec-strict-cmp-sub128.ll
index 1e56ddc0c8ec8..2eeee032c1c6e 100644
--- a/llvm/test/CodeGen/X86/vec-strict-cmp-sub128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-cmp-sub128.ll
@@ -283,17 +283,15 @@ define <2 x i32> @test_v2f32_oeq_q(<2 x i32> %a, <2 x i32> %b, <2 x float> %f1,
 ; AVX512-32-NEXT:    vucomiss 8(%ebp), %xmm2
 ; AVX512-32-NEXT:    setnp %al
 ; AVX512-32-NEXT:    sete %cl
-; AVX512-32-NEXT:    testb %al, %cl
-; AVX512-32-NEXT:    setne %al
-; AVX512-32-NEXT:    andl $1, %eax
-; AVX512-32-NEXT:    kmovw %eax, %k0
+; AVX512-32-NEXT:    andb %al, %cl
+; AVX512-32-NEXT:    andl $1, %ecx
+; AVX512-32-NEXT:    kmovw %ecx, %k0
 ; AVX512-32-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3]
 ; AVX512-32-NEXT:    vucomiss 12(%ebp), %xmm2
 ; AVX512-32-NEXT:    setnp %al
 ; AVX512-32-NEXT:    sete %cl
-; AVX512-32-NEXT:    testb %al, %cl
-; AVX512-32-NEXT:    setne %al
-; AVX512-32-NEXT:    kmovw %eax, %k1
+; AVX512-32-NEXT:    andb %al, %cl
+; AVX512-32-NEXT:    kmovw %ecx, %k1
 ; AVX512-32-NEXT:    kshiftlw $15, %k1, %k1
 ; AVX512-32-NEXT:    kshiftrw $14, %k1, %k1
 ; AVX512-32-NEXT:    korw %k1, %k0, %k1
@@ -307,18 +305,16 @@ define <2 x i32> @test_v2f32_oeq_q(<2 x i32> %a, <2 x i32> %b, <2 x float> %f1,
 ; AVX512-64-NEXT:    vucomiss %xmm3, %xmm2
 ; AVX512-64-NEXT:    setnp %al
 ; AVX512-64-NEXT:    sete %cl
-; AVX512-64-NEXT:    testb %al, %cl
-; AVX512-64-NEXT:    setne %al
-; AVX512-64-NEXT:    andl $1, %eax
-; AVX512-64-NEXT:    kmovw %eax, %k0
+; AVX512-64-NEXT:    andb %al, %cl
+; AVX512-64-NEXT:    andl $1, %ecx
+; AVX512-64-NEXT:    kmovw %ecx, %k0
 ; AVX512-64-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3]
 ; AVX512-64-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3]
 ; AVX512-64-NEXT:    vucomiss %xmm3, %xmm2
 ; AVX512-64-NEXT:    setnp %al
 ; AVX512-64-NEXT:    sete %cl
-; AVX512-64-NEXT:    testb %al, %cl
-; AVX512-64-NEXT:    setne %al
-; AVX512-64-NEXT:    kmovw %eax, %k1
+; AVX512-64-NEXT:    andb %al, %cl
+; AVX512-64-NEXT:    kmovw %ecx, %k1
 ; AVX512-64-NEXT:    kshiftlw $15, %k1, %k1
 ; AVX512-64-NEXT:    kshiftrw $14, %k1, %k1
 ; AVX512-64-NEXT:    korw %k1, %k0, %k1
@@ -336,17 +332,15 @@ define <2 x i32> @test_v2f32_oeq_q(<2 x i32> %a, <2 x i32> %b, <2 x float> %f1,
 ; AVX512F-32-NEXT:    vucomiss 8(%ebp), %xmm2
 ; AVX512F-32-NEXT:    setnp %al
 ; AVX512F-32-NEXT:    sete %cl
-; AVX512F-32-NEXT:    testb %al, %cl
-; AVX512F-32-NEXT:    setne %al
-; AVX512F-32-NEXT:    andl $1, %eax
-; AVX512F-32-NEXT:    kmovw %eax, %k0
+; AVX512F-32-NEXT:    andb %al, %cl
+; AVX512F-32-NEXT:    andl $1, %ecx
+; AVX512F-32-NEXT:    kmovw %ecx, %k0
 ; AVX512F-32-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3]
 ; AVX512F-32-NEXT:    vucomiss 12(%ebp), %xmm2
 ; AVX512F-32-NEXT:    setnp %al
 ; AVX512F-32-NEXT:    sete %cl
-; AVX512F-32-NEXT:    testb %al, %cl
-; AVX512F-32-NEXT:    setne %al
-; AVX512F-32-NEXT:    kmovw %eax, %k1
+; AVX512F-32-NEXT:    andb %al, %cl
+; AVX512F-32-NEXT:    kmovw %ecx, %k1
 ; AVX512F-32-NEXT:    kshiftlw $15, %k1, %k1
 ; AVX512F-32-NEXT:    kshiftrw $14, %k1, %k1
 ; AVX512F-32-NEXT:    korw %k1, %k0, %k1
@@ -364,18 +358,16 @@ define <2 x i32> @test_v2f32_oeq_q(<2 x i32> %a, <2 x i32> %b, <2 x float> %f1,
 ; AVX512F-64-NEXT:    vucomiss %xmm3, %xmm2
 ; AVX512F-64-NEXT:    setnp %al
 ; AVX512F-64-NEXT:    sete %cl
-; AVX512F-64-NEXT:    testb %al, %cl
-; AVX512F-64-NEXT:    setne %al
-; AVX512F-64-NEXT:    andl $1, %eax
-; AVX512F-64-NEXT:    kmovw %eax, %k0
+; AVX512F-64-NEXT:    andb %al, %cl
+; AVX512F-64-NEXT:    andl $1, %ecx
+; AVX512F-64-NEXT:    kmovw %ecx, %k0
 ; AVX512F-64-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3]
 ; AVX512F-64-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3]
 ; AVX512F-64-NEXT:    vucomiss %xmm3, %xmm2
 ; AVX512F-64-NEXT:    setnp %al
 ; AVX512F-64-NEXT:    sete %cl
-; AVX512F-64-NEXT:    testb %al, %cl
-; AVX512F-64-NEXT:    setne %al
-; AVX512F-64-NEXT:    kmovw %eax, %k1
+; AVX512F-64-NEXT:    andb %al, %cl
+; AVX512F-64-NEXT:    kmovw %ecx, %k1
 ; AVX512F-64-NEXT:    kshiftlw $15, %k1, %k1
 ; AVX512F-64-NEXT:    kshiftrw $14, %k1, %k1
 ; AVX512F-64-NEXT:    korw %k1, %k0, %k1

diff  --git a/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll b/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
index 465988760d44a..d0a178dcc08b8 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
@@ -414,9 +414,8 @@ define half @test_v2f16(<2 x half> %a0) nounwind {
 ; AVX512F-NEXT:    vpsrld $16, %xmm0, %xmm1
 ; AVX512F-NEXT:    vcvtph2ps %xmm0, %xmm2
 ; AVX512F-NEXT:    vcvtph2ps %xmm1, %xmm3
-; AVX512F-NEXT:    xorl %eax, %eax
 ; AVX512F-NEXT:    vucomiss %xmm3, %xmm2
-; AVX512F-NEXT:    sbbl %eax, %eax
+; AVX512F-NEXT:    setb %al
 ; AVX512F-NEXT:    kmovd %eax, %k1
 ; AVX512F-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
 ; AVX512F-NEXT:    vmovdqa %xmm1, %xmm0


        


More information about the llvm-commits mailing list