[llvm] 9391d46 - [SelectionDAG] Eliminate redundant setcc on comparison results (#171431)

via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 16 08:45:23 PST 2026


Author: Valeriy Savchenko
Date: 2026-01-16T16:45:19Z
New Revision: 9391d463891d3a777a8924749401530143399a0f

URL: https://github.com/llvm/llvm-project/commit/9391d463891d3a777a8924749401530143399a0f
DIFF: https://github.com/llvm/llvm-project/commit/9391d463891d3a777a8924749401530143399a0f.diff

LOG: [SelectionDAG] Eliminate redundant setcc on comparison results (#171431)

When comparisons produce all-zeros or all-ones in scalars or per lane in
vectors, comparing results of such comparisons against 0 is an identity
operation. This change eliminates redundant comparison instructions
after another comparison operation.

Added: 
    llvm/test/CodeGen/AArch64/setcc-redundant-cmlt.ll
    llvm/test/CodeGen/M68k/setcc-redundant.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 6a0f5c8a2467b..0ec783f1f1d04 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -4745,6 +4745,21 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
     }
   }
 
+  // setcc X, 0, setlt --> X  (when X is all sign bits)
+  // setcc X, 0, setne --> X  (when X is all sign bits)
+  //
+  // When we know that X has 0 or -1 in each element (or scalar), this
+  // comparison will produce X. This is only true when boolean contents are
+  // represented via 0s and -1s.
+  if (VT == OpVT &&
+      // Check that the result of setcc is 0 and -1.
+      getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent &&
+      // Match only for checks X < 0 and X != 0
+      (Cond == ISD::SETLT || Cond == ISD::SETNE) && isNullOrNullSplat(N1) &&
+      // The identity holds iff we know all sign bits for all lanes.
+      DAG.ComputeNumSignBits(N0) == N0.getScalarValueSizeInBits())
+    return N0;
+
   // FIXME: Support vectors.
   if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
     const APInt &C1 = N1C->getAPIntValue();

diff  --git a/llvm/test/CodeGen/AArch64/setcc-redundant-cmlt.ll b/llvm/test/CodeGen/AArch64/setcc-redundant-cmlt.ll
new file mode 100644
index 0000000000000..fe4f35b9a3527
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/setcc-redundant-cmlt.ll
@@ -0,0 +1,432 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64 < %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel < %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
+
+define <4 x i32> @direct_setcc_lt0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-SD-LABEL: direct_setcc_lt0:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: direct_setcc_lt0:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-GI-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-GI-NEXT:    ret
+  %cmp = icmp slt <4 x i32> %a, %b
+  %sext = sext <4 x i1> %cmp to <4 x i32>
+  %lt0 = icmp slt <4 x i32> %sext, zeroinitializer
+  %sel = select <4 x i1> %lt0, <4 x i32> %x, <4 x i32> %y
+  ret <4 x i32> %sel
+}
+
+define <4 x i32> @shuffle_setcc_lt0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-SD-LABEL: shuffle_setcc_lt0:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    dup v0.4s, v0.s[2]
+; CHECK-SD-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: shuffle_setcc_lt0:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    dup v0.4s, v0.s[2]
+; CHECK-GI-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-GI-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-GI-NEXT:    ret
+  %cmp = icmp slt <4 x i32> %a, %b
+  %sext = sext <4 x i1> %cmp to <4 x i32>
+  %dup = shufflevector <4 x i32> %sext, <4 x i32> poison, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
+  %lt0 = icmp slt <4 x i32> %dup, zeroinitializer
+  %sel = select <4 x i1> %lt0, <4 x i32> %x, <4 x i32> %y
+  ret <4 x i32> %sel
+}
+
+define <4 x i32> @direct_setcc_0gt(<4 x i32> %a, <4 x i32> %b, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-SD-LABEL: direct_setcc_0gt:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: direct_setcc_0gt:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-GI-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-GI-NEXT:    ret
+  %cmp = icmp slt <4 x i32> %a, %b
+  %sext = sext <4 x i1> %cmp to <4 x i32>
+  %gt0 = icmp sgt <4 x i32> zeroinitializer, %sext
+  %sel = select <4 x i1> %gt0, <4 x i32> %x, <4 x i32> %y
+  ret <4 x i32> %sel
+}
+
+define <8 x i16> @direct_setcc_lt0_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-SD-LABEL: direct_setcc_lt0_v8i16:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v0.8h, v1.8h, v0.8h
+; CHECK-SD-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: direct_setcc_lt0_v8i16:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.8h, v1.8h, v0.8h
+; CHECK-GI-NEXT:    cmlt v0.8h, v0.8h, #0
+; CHECK-GI-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-GI-NEXT:    ret
+  %cmp = icmp slt <8 x i16> %a, %b
+  %sext = sext <8 x i1> %cmp to <8 x i16>
+  %lt0 = icmp slt <8 x i16> %sext, zeroinitializer
+  %sel = select <8 x i1> %lt0, <8 x i16> %x, <8 x i16> %y
+  ret <8 x i16> %sel
+}
+
+define <4 x i32> @non_splat_shuffle(<4 x i32> %a, <4 x i32> %b, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-SD-LABEL: non_splat_shuffle:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-SD-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: non_splat_shuffle:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    rev64 v0.4s, v0.4s
+; CHECK-GI-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-GI-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-GI-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-GI-NEXT:    ret
+  %cmp = icmp slt <4 x i32> %a, %b
+  %sext = sext <4 x i1> %cmp to <4 x i32>
+  %shuf = shufflevector <4 x i32> %sext, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+  %lt0 = icmp slt <4 x i32> %shuf, zeroinitializer
+  %sel = select <4 x i1> %lt0, <4 x i32> %x, <4 x i32> %y
+  ret <4 x i32> %sel
+}
+
+define <16 x i8> @bitcast_narrow(<4 x i32> %a, <4 x i32> %b, <16 x i8> %x, <16 x i8> %y) {
+; CHECK-SD-LABEL: bitcast_narrow:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: bitcast_narrow:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    cmlt v0.16b, v0.16b, #0
+; CHECK-GI-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-GI-NEXT:    ret
+  %cmp = icmp slt <4 x i32> %a, %b
+  %sext = sext <4 x i1> %cmp to <4 x i32>
+  %bc = bitcast <4 x i32> %sext to <16 x i8>
+  %lt0 = icmp slt <16 x i8> %bc, zeroinitializer
+  %sel = select <16 x i1> %lt0, <16 x i8> %x, <16 x i8> %y
+  ret <16 x i8> %sel
+}
+
+define <8 x i16> @chain_shuffle_bitcast(<4 x i32> %a, <4 x i32> %b, <8 x i16> %x, <8 x i16> %y) {
+; CHECK-SD-LABEL: chain_shuffle_bitcast:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    dup v0.4s, v0.s[2]
+; CHECK-SD-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: chain_shuffle_bitcast:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    dup v0.4s, v0.s[2]
+; CHECK-GI-NEXT:    cmlt v0.8h, v0.8h, #0
+; CHECK-GI-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-GI-NEXT:    ret
+  %cmp = icmp slt <4 x i32> %a, %b
+  %sext = sext <4 x i1> %cmp to <4 x i32>
+  %shuf = shufflevector <4 x i32> %sext, <4 x i32> poison, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
+  %bc = bitcast <4 x i32> %shuf to <8 x i16>
+  %lt0 = icmp slt <8 x i16> %bc, zeroinitializer
+  %sel = select <8 x i1> %lt0, <8 x i16> %x, <8 x i16> %y
+  ret <8 x i16> %sel
+}
+
+define <4 x i32> @setcc_ne0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-SD-LABEL: setcc_ne0:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: setcc_ne0:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    cmtst v0.4s, v0.4s, v0.4s
+; CHECK-GI-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-GI-NEXT:    ret
+  %cmp = icmp slt <4 x i32> %a, %b
+  %sext = sext <4 x i1> %cmp to <4 x i32>
+  %ne0 = icmp ne <4 x i32> %sext, zeroinitializer
+  %sel = select <4 x i1> %ne0, <4 x i32> %x, <4 x i32> %y
+  ret <4 x i32> %sel
+}
+
+define <4 x i32> @setcc_ugt0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-SD-LABEL: setcc_ugt0:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: setcc_ugt0:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v4.2d, #0000000000000000
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    cmhi v0.4s, v0.4s, v4.4s
+; CHECK-GI-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-GI-NEXT:    ret
+  %cmp = icmp slt <4 x i32> %a, %b
+  %sext = sext <4 x i1> %cmp to <4 x i32>
+  %ugt0 = icmp ugt <4 x i32> %sext, zeroinitializer
+  %sel = select <4 x i1> %ugt0, <4 x i32> %x, <4 x i32> %y
+  ret <4 x i32> %sel
+}
+
+; NEGATIVE TEST: Widening bitcast should NOT be optimized
+define <4 x i32> @bitcast_widen_negative(<16 x i8> %a, <16 x i8> %b, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: bitcast_widen_negative:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmgt v0.16b, v1.16b, v0.16b
+; CHECK-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-NEXT:    bsl v0.16b, v2.16b, v3.16b
+; CHECK-NEXT:    ret
+  %cmp = icmp slt <16 x i8> %a, %b
+  %sext = sext <16 x i1> %cmp to <16 x i8>
+  %bc = bitcast <16 x i8> %sext to <4 x i32>
+  %lt0 = icmp slt <4 x i32> %bc, zeroinitializer
+  %sel = select <4 x i1> %lt0, <4 x i32> %x, <4 x i32> %y
+  ret <4 x i32> %sel
+}
+
+define <4 x i32> @smax_setcc(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-SD-LABEL: smax_setcc:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v2.4s, v3.4s, v2.4s
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-SD-NEXT:    bsl v0.16b, v4.16b, v5.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: smax_setcc:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    cmgt v1.4s, v3.4s, v2.4s
+; CHECK-GI-NEXT:    smax v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-GI-NEXT:    bsl v0.16b, v4.16b, v5.16b
+; CHECK-GI-NEXT:    ret
+  %cmp1 = icmp slt <4 x i32> %a, %b
+  %sext1 = sext <4 x i1> %cmp1 to <4 x i32>
+  %cmp2 = icmp slt <4 x i32> %c, %d
+  %sext2 = sext <4 x i1> %cmp2 to <4 x i32>
+  %smax = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %sext1, <4 x i32> %sext2)
+  %lt0 = icmp slt <4 x i32> %smax, zeroinitializer
+  %sel = select <4 x i1> %lt0, <4 x i32> %x, <4 x i32> %y
+  ret <4 x i32> %sel
+}
+
+define <4 x i32> @smin_setcc(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-SD-LABEL: smin_setcc:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v2.4s, v3.4s, v2.4s
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-SD-NEXT:    bsl v0.16b, v4.16b, v5.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: smin_setcc:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    cmgt v1.4s, v3.4s, v2.4s
+; CHECK-GI-NEXT:    smin v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-GI-NEXT:    bsl v0.16b, v4.16b, v5.16b
+; CHECK-GI-NEXT:    ret
+  %cmp1 = icmp slt <4 x i32> %a, %b
+  %sext1 = sext <4 x i1> %cmp1 to <4 x i32>
+  %cmp2 = icmp slt <4 x i32> %c, %d
+  %sext2 = sext <4 x i1> %cmp2 to <4 x i32>
+  %smin = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %sext1, <4 x i32> %sext2)
+  %lt0 = icmp slt <4 x i32> %smin, zeroinitializer
+  %sel = select <4 x i1> %lt0, <4 x i32> %x, <4 x i32> %y
+  ret <4 x i32> %sel
+}
+
+define <4 x i32> @umax_setcc(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-SD-LABEL: umax_setcc:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v2.4s, v3.4s, v2.4s
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-SD-NEXT:    bsl v0.16b, v4.16b, v5.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: umax_setcc:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    cmgt v1.4s, v3.4s, v2.4s
+; CHECK-GI-NEXT:    umax v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-GI-NEXT:    bsl v0.16b, v4.16b, v5.16b
+; CHECK-GI-NEXT:    ret
+  %cmp1 = icmp slt <4 x i32> %a, %b
+  %sext1 = sext <4 x i1> %cmp1 to <4 x i32>
+  %cmp2 = icmp slt <4 x i32> %c, %d
+  %sext2 = sext <4 x i1> %cmp2 to <4 x i32>
+  %umax = call <4 x i32> @llvm.umax.v4i32(<4 x i32> %sext1, <4 x i32> %sext2)
+  %lt0 = icmp slt <4 x i32> %umax, zeroinitializer
+  %sel = select <4 x i1> %lt0, <4 x i32> %x, <4 x i32> %y
+  ret <4 x i32> %sel
+}
+
+define <4 x i32> @umin_setcc(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-SD-LABEL: umin_setcc:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v2.4s, v3.4s, v2.4s
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-SD-NEXT:    bsl v0.16b, v4.16b, v5.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: umin_setcc:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    cmgt v1.4s, v3.4s, v2.4s
+; CHECK-GI-NEXT:    umin v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-GI-NEXT:    bsl v0.16b, v4.16b, v5.16b
+; CHECK-GI-NEXT:    ret
+  %cmp1 = icmp slt <4 x i32> %a, %b
+  %sext1 = sext <4 x i1> %cmp1 to <4 x i32>
+  %cmp2 = icmp slt <4 x i32> %c, %d
+  %sext2 = sext <4 x i1> %cmp2 to <4 x i32>
+  %umin = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %sext1, <4 x i32> %sext2)
+  %lt0 = icmp slt <4 x i32> %umin, zeroinitializer
+  %sel = select <4 x i1> %lt0, <4 x i32> %x, <4 x i32> %y
+  ret <4 x i32> %sel
+}
+
+define <4 x i32> @select_setcc(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %e, <4 x i32> %f, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-SD-LABEL: select_setcc:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v2.4s, v3.4s, v2.4s
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    cmgt v1.4s, v5.4s, v4.4s
+; CHECK-SD-NEXT:    bif v0.16b, v2.16b, v1.16b
+; CHECK-SD-NEXT:    bsl v0.16b, v6.16b, v7.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: select_setcc:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    cmgt v1.4s, v3.4s, v2.4s
+; CHECK-GI-NEXT:    cmgt v2.4s, v5.4s, v4.4s
+; CHECK-GI-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-GI-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-GI-NEXT:    bsl v0.16b, v6.16b, v7.16b
+; CHECK-GI-NEXT:    ret
+  %cmp1 = icmp slt <4 x i32> %a, %b
+  %sext1 = sext <4 x i1> %cmp1 to <4 x i32>
+  %cmp2 = icmp slt <4 x i32> %c, %d
+  %sext2 = sext <4 x i1> %cmp2 to <4 x i32>
+  %cond = icmp slt <4 x i32> %e, %f
+  %sel1 = select <4 x i1> %cond, <4 x i32> %sext1, <4 x i32> %sext2
+  %lt0 = icmp slt <4 x i32> %sel1, zeroinitializer
+  %sel2 = select <4 x i1> %lt0, <4 x i32> %x, <4 x i32> %y
+  ret <4 x i32> %sel2
+}
+
+define <4 x i32> @and_setcc(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-SD-LABEL: and_setcc:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v2.4s, v3.4s, v2.4s
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-SD-NEXT:    bsl v0.16b, v4.16b, v5.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: and_setcc:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    cmgt v1.4s, v3.4s, v2.4s
+; CHECK-GI-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-GI-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-GI-NEXT:    bsl v0.16b, v4.16b, v5.16b
+; CHECK-GI-NEXT:    ret
+  %cmp1 = icmp slt <4 x i32> %a, %b
+  %sext1 = sext <4 x i1> %cmp1 to <4 x i32>
+  %cmp2 = icmp slt <4 x i32> %c, %d
+  %sext2 = sext <4 x i1> %cmp2 to <4 x i32>
+  %and = and <4 x i32> %sext1, %sext2
+  %lt0 = icmp slt <4 x i32> %and, zeroinitializer
+  %sel = select <4 x i1> %lt0, <4 x i32> %x, <4 x i32> %y
+  ret <4 x i32> %sel
+}
+
+define <4 x i32> @or_setcc(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-SD-LABEL: or_setcc:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v2.4s, v3.4s, v2.4s
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    orr v0.16b, v0.16b, v2.16b
+; CHECK-SD-NEXT:    bsl v0.16b, v4.16b, v5.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: or_setcc:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    cmgt v1.4s, v3.4s, v2.4s
+; CHECK-GI-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-GI-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-GI-NEXT:    bsl v0.16b, v4.16b, v5.16b
+; CHECK-GI-NEXT:    ret
+  %cmp1 = icmp slt <4 x i32> %a, %b
+  %sext1 = sext <4 x i1> %cmp1 to <4 x i32>
+  %cmp2 = icmp slt <4 x i32> %c, %d
+  %sext2 = sext <4 x i1> %cmp2 to <4 x i32>
+  %or = or <4 x i32> %sext1, %sext2
+  %lt0 = icmp slt <4 x i32> %or, zeroinitializer
+  %sel = select <4 x i1> %lt0, <4 x i32> %x, <4 x i32> %y
+  ret <4 x i32> %sel
+}
+
+define <4 x i32> @xor_setcc(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-SD-LABEL: xor_setcc:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmgt v2.4s, v3.4s, v2.4s
+; CHECK-SD-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT:    eor v0.16b, v0.16b, v2.16b
+; CHECK-SD-NEXT:    bsl v0.16b, v4.16b, v5.16b
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: xor_setcc:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmgt v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT:    cmgt v1.4s, v3.4s, v2.4s
+; CHECK-GI-NEXT:    eor v0.16b, v0.16b, v1.16b
+; CHECK-GI-NEXT:    cmlt v0.4s, v0.4s, #0
+; CHECK-GI-NEXT:    bsl v0.16b, v4.16b, v5.16b
+; CHECK-GI-NEXT:    ret
+  %cmp1 = icmp slt <4 x i32> %a, %b
+  %sext1 = sext <4 x i1> %cmp1 to <4 x i32>
+  %cmp2 = icmp slt <4 x i32> %c, %d
+  %sext2 = sext <4 x i1> %cmp2 to <4 x i32>
+  %xor = xor <4 x i32> %sext1, %sext2
+  %lt0 = icmp slt <4 x i32> %xor, zeroinitializer
+  %sel = select <4 x i1> %lt0, <4 x i32> %x, <4 x i32> %y
+  ret <4 x i32> %sel
+}

diff  --git a/llvm/test/CodeGen/M68k/setcc-redundant.ll b/llvm/test/CodeGen/M68k/setcc-redundant.ll
new file mode 100644
index 0000000000000..0a752db47f92c
--- /dev/null
+++ b/llvm/test/CodeGen/M68k/setcc-redundant.ll
@@ -0,0 +1,44 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=m68k < %s | FileCheck %s
+
+define i8 @scalar_setcc_lt0(i8 %a, i8 %b, i8 %x, i8 %y) {
+; CHECK-LABEL: scalar_setcc_lt0:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    move.b (7,%sp), %d0
+; CHECK-NEXT:    sub.b (11,%sp), %d0
+; CHECK-NEXT:    blt .LBB0_1
+; CHECK-NEXT:  ; %bb.2:
+; CHECK-NEXT:    lea (19,%sp), %a0
+; CHECK-NEXT:    move.b (%a0), %d0
+; CHECK-NEXT:    rts
+; CHECK-NEXT:  .LBB0_1:
+; CHECK-NEXT:    lea (15,%sp), %a0
+; CHECK-NEXT:    move.b (%a0), %d0
+; CHECK-NEXT:    rts
+  %cmp = icmp slt i8 %a, %b
+  %sext = sext i1 %cmp to i8
+  %lt0 = icmp slt i8 %sext, 0
+  %sel = select i1 %lt0, i8 %x, i8 %y
+  ret i8 %sel
+}
+
+define i8 @scalar_setcc_ne0(i8 %a, i8 %b, i8 %x, i8 %y) {
+; CHECK-LABEL: scalar_setcc_ne0:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    move.b (7,%sp), %d0
+; CHECK-NEXT:    sub.b (11,%sp), %d0
+; CHECK-NEXT:    blt .LBB1_1
+; CHECK-NEXT:  ; %bb.2:
+; CHECK-NEXT:    lea (19,%sp), %a0
+; CHECK-NEXT:    move.b (%a0), %d0
+; CHECK-NEXT:    rts
+; CHECK-NEXT:  .LBB1_1:
+; CHECK-NEXT:    lea (15,%sp), %a0
+; CHECK-NEXT:    move.b (%a0), %d0
+; CHECK-NEXT:    rts
+  %cmp = icmp slt i8 %a, %b
+  %sext = sext i1 %cmp to i8
+  %ne0 = icmp ne i8 %sext, 0
+  %sel = select i1 %ne0, i8 %x, i8 %y
+  ret i8 %sel
+}


        


More information about the llvm-commits mailing list