[llvm] ec91d76 - [RISCV] Apply DeMorgan's law to (and/or (xor X, 1), (xor Y, 1)) if X and Y are 0/1.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 25 08:54:51 PDT 2022


Author: Craig Topper
Date: 2022-08-25T08:49:30-07:00
New Revision: ec91d761ac03378456fe0528e0fe8ea4dd70927c

URL: https://github.com/llvm/llvm-project/commit/ec91d761ac03378456fe0528e0fe8ea4dd70927c
DIFF: https://github.com/llvm/llvm-project/commit/ec91d761ac03378456fe0528e0fe8ea4dd70927c.diff

LOG: [RISCV] Apply DeMorgan's law to (and/or (xor X, 1), (xor Y, 1)) if X and Y are 0/1.

This optimizes xors that appear due to legalizing setge/setle which
require an xor with 1. This reduces the number of xors and may
allow the xor to fold with a beqz or bnez.

Differential Revision: https://reviews.llvm.org/D132614

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/setcc-logic.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 0a405f1b5e0de..b39466eacf8d9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8415,8 +8415,46 @@ static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
 }
 
-static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
+// Apply DeMorgan's law to (and/or (xor X, 1), (xor Y, 1)) if X and Y are 0/1.
+// Legalizing setcc can introduce xors like this. Doing this transform reduces
+// the number of xors and may allow the xor to fold into a branch condition.
+static SDValue combineDeMorganOfBoolean(SDNode *N, SelectionDAG &DAG) {
+  SDValue N0 = N->getOperand(0);
+  SDValue N1 = N->getOperand(1);
+  bool IsAnd = N->getOpcode() == ISD::AND;
+
+  if (N0.getOpcode() != ISD::XOR || N1.getOpcode() != ISD::XOR)
+    return SDValue();
+
+  if (!N0.hasOneUse() || !N1.hasOneUse())
+    return SDValue();
+
+  // RHS of both xors needs to be 1.
+  if (!isOneConstant(N0.getOperand(1)) || !isOneConstant(N1.getOperand(1)))
+    return SDValue();
+
+  EVT VT = N->getValueType(0);
+
+  SDValue N00 = N0.getOperand(0);
+  SDValue N10 = N1.getOperand(0);
+
+  // The LHS of the xors needs to be 0/1.
+  APInt Mask = APInt::getBitsSetFrom(VT.getSizeInBits(), 1);
+  if (!DAG.MaskedValueIsZero(N00, Mask) || !DAG.MaskedValueIsZero(N10, Mask))
+    return SDValue();
+
+  // Invert the opcode and insert a new xor.
+  SDLoc DL(N);
+  unsigned Opc = IsAnd ? ISD::OR : ISD::AND;
+  SDValue Logic = DAG.getNode(Opc, DL, VT, N00, N10);
+  return DAG.getNode(ISD::XOR, DL, VT, Logic, DAG.getConstant(1, DL, VT));
+}
+
+static SDValue performANDCombine(SDNode *N,
+                                 TargetLowering::DAGCombinerInfo &DCI,
                                  const RISCVSubtarget &Subtarget) {
+  SelectionDAG &DAG = DCI.DAG;
+
   SDValue N0 = N->getOperand(0);
   // Pre-promote (i32 (and (srl X, Y), 1)) on RV64 with Zbs without zero
   // extending X. This is safe since we only need the LSB after the shift and
@@ -8439,13 +8477,19 @@ static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
   if (SDValue V = combineBinOpToReduce(N, DAG))
     return V;
 
+  if (DCI.isAfterLegalizeDAG())
+    if (SDValue V = combineDeMorganOfBoolean(N, DAG))
+      return V;
+
   // fold (and (select lhs, rhs, cc, -1, y), x) ->
   //      (select lhs, rhs, cc, x, (and x, y))
   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
 }
 
-static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
+static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                                 const RISCVSubtarget &Subtarget) {
+  SelectionDAG &DAG = DCI.DAG;
+
   if (Subtarget.hasStdExtZbp()) {
     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
       return GREV;
@@ -8457,6 +8501,11 @@ static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
 
   if (SDValue V = combineBinOpToReduce(N, DAG))
     return V;
+
+  if (DCI.isAfterLegalizeDAG())
+    if (SDValue V = combineDeMorganOfBoolean(N, DAG))
+      return V;
+
   // fold (or (select cond, 0, y), x) ->
   //      (select cond, x, (or x, y))
   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
@@ -9349,9 +9398,9 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
   case ISD::SUB:
     return performSUBCombine(N, DAG);
   case ISD::AND:
-    return performANDCombine(N, DAG, Subtarget);
+    return performANDCombine(N, DCI, Subtarget);
   case ISD::OR:
-    return performORCombine(N, DAG, Subtarget);
+    return performORCombine(N, DCI, Subtarget);
   case ISD::XOR:
     return performXORCombine(N, DAG);
   case ISD::FADD:

diff  --git a/llvm/test/CodeGen/RISCV/setcc-logic.ll b/llvm/test/CodeGen/RISCV/setcc-logic.ll
index c6316609df2cf..b379c8ae1c962 100644
--- a/llvm/test/CodeGen/RISCV/setcc-logic.ll
+++ b/llvm/test/CodeGen/RISCV/setcc-logic.ll
@@ -219,19 +219,17 @@ define i1 @or_icmp_sge(i32 signext %a, i32 signext %b, i32 signext %c, i32 signe
 ; RV32I-LABEL: or_icmp_sge:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a0, a1
-; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    slt a1, a2, a3
-; RV32I-NEXT:    xori a1, a1, 1
-; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: or_icmp_sge:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slt a0, a0, a1
-; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    slt a1, a2, a3
-; RV64I-NEXT:    xori a1, a1, 1
-; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ret
   %cmp1 = icmp sge i32 %a, %b
   %cmp2 = icmp sge i32 %c, %d
@@ -243,19 +241,17 @@ define i1 @or_icmp_sle(i32 signext %a, i32 signext %b, i32 signext %c, i32 signe
 ; RV32I-LABEL: or_icmp_sle:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a1, a0
-; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    slt a1, a3, a2
-; RV32I-NEXT:    xori a1, a1, 1
-; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: or_icmp_sle:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slt a0, a1, a0
-; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    slt a1, a3, a2
-; RV64I-NEXT:    xori a1, a1, 1
-; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ret
   %cmp1 = icmp sle i32 %a, %b
   %cmp2 = icmp sle i32 %c, %d
@@ -267,19 +263,17 @@ define i1 @or_icmp_uge(i32 signext %a, i32 signext %b, i32 signext %c, i32 signe
 ; RV32I-LABEL: or_icmp_uge:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a0, a1
-; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    sltu a1, a2, a3
-; RV32I-NEXT:    xori a1, a1, 1
-; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: or_icmp_uge:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sltu a0, a0, a1
-; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    sltu a1, a2, a3
-; RV64I-NEXT:    xori a1, a1, 1
-; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ret
   %cmp1 = icmp uge i32 %a, %b
   %cmp2 = icmp uge i32 %c, %d
@@ -291,19 +285,17 @@ define i1 @or_icmp_ule(i32 signext %a, i32 signext %b, i32 signext %c, i32 signe
 ; RV32I-LABEL: or_icmp_ule:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a1, a0
-; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    sltu a1, a3, a2
-; RV32I-NEXT:    xori a1, a1, 1
-; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: or_icmp_ule:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sltu a0, a1, a0
-; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    sltu a1, a3, a2
-; RV64I-NEXT:    xori a1, a1, 1
-; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ret
   %cmp1 = icmp ule i32 %a, %b
   %cmp2 = icmp ule i32 %c, %d


        


More information about the llvm-commits mailing list