[llvm] r314514 - [X86] Improve codegen for inverted overflow checking intrinsics.

Amara Emerson via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 29 06:53:44 PDT 2017


Author: aemerson
Date: Fri Sep 29 06:53:44 2017
New Revision: 314514

URL: http://llvm.org/viewvc/llvm-project?rev=314514&view=rev
Log:
[X86] Improve codegen for inverted overflow checking intrinsics.

Adds a new combine for: xor(setcc cc, val), 1 --> setcc (invert(cc), val)

Differential Revision: https://reviews.llvm.org/D38161

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/overflow-intrinsic-setcc-fold.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=314514&r1=314513&r2=314514&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Fri Sep 29 06:53:44 2017
@@ -34109,6 +34109,23 @@ static SDValue lowerX86FPLogicOp(SDNode
   return SDValue();
 }
 
+
+/// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
+static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
+  if (N->getOpcode() != ISD::XOR)
+    return SDValue();
+
+  SDValue LHS = N->getOperand(0);
+  auto *RHSC = dyn_cast<ConstantSDNode>(N->getOperand(1));
+  if (!RHSC || RHSC->getZExtValue() != 1 || LHS->getOpcode() != X86ISD::SETCC)
+    return SDValue();
+
+  X86::CondCode NewCC = X86::GetOppositeBranchCondition(
+      X86::CondCode(LHS->getConstantOperandVal(0)));
+  SDLoc DL(N);
+  return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
+}
+
 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
                           TargetLowering::DAGCombinerInfo &DCI,
                           const X86Subtarget &Subtarget) {
@@ -34118,6 +34135,9 @@ static SDValue combineXor(SDNode *N, Sel
   if (DCI.isBeforeLegalizeOps())
     return SDValue();
 
+  if (SDValue SetCC = foldXor1SetCC(N, DAG))
+    return SetCC;
+
   if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
     return RV;
 

Modified: llvm/trunk/test/CodeGen/X86/overflow-intrinsic-setcc-fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/overflow-intrinsic-setcc-fold.ll?rev=314514&r1=314513&r2=314514&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/overflow-intrinsic-setcc-fold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/overflow-intrinsic-setcc-fold.ll Fri Sep 29 06:53:44 2017
@@ -5,8 +5,7 @@ define i1 @saddo_not_i32(i32 %v1, i32 %v
 ; CHECK-LABEL: saddo_not_i32:
 ; CHECK:       ## BB#0: ## %entry
 ; CHECK-NEXT:    addl %esi, %edi
-; CHECK-NEXT:    seto %al
-; CHECK-NEXT:    xorb $1, %al
+; CHECK-NEXT:    setno %al
 ; CHECK-NEXT:    retq
 entry:
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
@@ -19,8 +18,7 @@ define i1 @saddo_not_i64(i64 %v1, i64 %v
 ; CHECK-LABEL: saddo_not_i64:
 ; CHECK:       ## BB#0: ## %entry
 ; CHECK-NEXT:    addq %rsi, %rdi
-; CHECK-NEXT:    seto %al
-; CHECK-NEXT:    xorb $1, %al
+; CHECK-NEXT:    setno %al
 ; CHECK-NEXT:    retq
 entry:
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
@@ -33,8 +31,7 @@ define i1 @uaddo_not_i32(i32 %v1, i32 %v
 ; CHECK-LABEL: uaddo_not_i32:
 ; CHECK:       ## BB#0: ## %entry
 ; CHECK-NEXT:    addl %esi, %edi
-; CHECK-NEXT:    setb %al
-; CHECK-NEXT:    xorb $1, %al
+; CHECK-NEXT:    setae %al
 ; CHECK-NEXT:    retq
 entry:
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
@@ -47,8 +44,7 @@ define i1 @uaddo_not_i64(i64 %v1, i64 %v
 ; CHECK-LABEL: uaddo_not_i64:
 ; CHECK:       ## BB#0: ## %entry
 ; CHECK-NEXT:    addq %rsi, %rdi
-; CHECK-NEXT:    setb %al
-; CHECK-NEXT:    xorb $1, %al
+; CHECK-NEXT:    setae %al
 ; CHECK-NEXT:    retq
 entry:
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
@@ -61,8 +57,7 @@ define i1 @ssubo_not_i32(i32 %v1, i32 %v
 ; CHECK-LABEL: ssubo_not_i32:
 ; CHECK:       ## BB#0: ## %entry
 ; CHECK-NEXT:    cmpl %esi, %edi
-; CHECK-NEXT:    seto %al
-; CHECK-NEXT:    xorb $1, %al
+; CHECK-NEXT:    setno %al
 ; CHECK-NEXT:    retq
 entry:
   %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
@@ -75,8 +70,7 @@ define i1 @ssub_not_i64(i64 %v1, i64 %v2
 ; CHECK-LABEL: ssub_not_i64:
 ; CHECK:       ## BB#0: ## %entry
 ; CHECK-NEXT:    cmpq %rsi, %rdi
-; CHECK-NEXT:    seto %al
-; CHECK-NEXT:    xorb $1, %al
+; CHECK-NEXT:    setno %al
 ; CHECK-NEXT:    retq
 entry:
   %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
@@ -89,8 +83,7 @@ define i1 @usubo_not_i32(i32 %v1, i32 %v
 ; CHECK-LABEL: usubo_not_i32:
 ; CHECK:       ## BB#0: ## %entry
 ; CHECK-NEXT:    cmpl %esi, %edi
-; CHECK-NEXT:    setb %al
-; CHECK-NEXT:    xorb $1, %al
+; CHECK-NEXT:    setae %al
 ; CHECK-NEXT:    retq
 entry:
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
@@ -103,8 +96,7 @@ define i1 @usubo_not_i64(i64 %v1, i64 %v
 ; CHECK-LABEL: usubo_not_i64:
 ; CHECK:       ## BB#0: ## %entry
 ; CHECK-NEXT:    cmpq %rsi, %rdi
-; CHECK-NEXT:    setb %al
-; CHECK-NEXT:    xorb $1, %al
+; CHECK-NEXT:    setae %al
 ; CHECK-NEXT:    retq
 entry:
   %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
@@ -117,8 +109,7 @@ define i1 @smulo_not_i32(i32 %v1, i32 %v
 ; CHECK-LABEL: smulo_not_i32:
 ; CHECK:       ## BB#0: ## %entry
 ; CHECK-NEXT:    imull %esi, %edi
-; CHECK-NEXT:    seto %al
-; CHECK-NEXT:    xorb $1, %al
+; CHECK-NEXT:    setno %al
 ; CHECK-NEXT:    retq
 entry:
   %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
@@ -131,8 +122,7 @@ define i1 @smulo_not_i64(i64 %v1, i64 %v
 ; CHECK-LABEL: smulo_not_i64:
 ; CHECK:       ## BB#0: ## %entry
 ; CHECK-NEXT:    imulq %rsi, %rdi
-; CHECK-NEXT:    seto %al
-; CHECK-NEXT:    xorb $1, %al
+; CHECK-NEXT:    setno %al
 ; CHECK-NEXT:    retq
 entry:
   %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
@@ -146,8 +136,7 @@ define i1 @umulo_not_i32(i32 %v1, i32 %v
 ; CHECK:       ## BB#0: ## %entry
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    mull %esi
-; CHECK-NEXT:    seto %al
-; CHECK-NEXT:    xorb $1, %al
+; CHECK-NEXT:    setno %al
 ; CHECK-NEXT:    retq
 entry:
   %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
@@ -161,8 +150,7 @@ define i1 @umulo_not_i64(i64 %v1, i64 %v
 ; CHECK:       ## BB#0: ## %entry
 ; CHECK-NEXT:    movq %rdi, %rax
 ; CHECK-NEXT:    mulq %rsi
-; CHECK-NEXT:    seto %al
-; CHECK-NEXT:    xorb $1, %al
+; CHECK-NEXT:    setno %al
 ; CHECK-NEXT:    retq
 entry:
   %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)




More information about the llvm-commits mailing list