[llvm] 41a3b57 - [RISCV] Teach combineDeMorganOfBoolean to handle (and (xor X, 1), (not Y)).

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 25 10:56:04 PDT 2022


Author: Craig Topper
Date: 2022-08-25T10:55:45-07:00
New Revision: 41a3b5739b944076402a9eb670b08d3e34f3f394

URL: https://github.com/llvm/llvm-project/commit/41a3b5739b944076402a9eb670b08d3e34f3f394
DIFF: https://github.com/llvm/llvm-project/commit/41a3b5739b944076402a9eb670b08d3e34f3f394.diff

LOG: [RISCV] Teach combineDeMorganOfBoolean to handle (and (xor X, 1), (not Y)).

SimplifyDemandedBits tries to agressively turn xor immediates into -1
to match a 'not' instruction. In this case, because X is a boolean, the
upper bits of (xor X, 1) are known to be 0. Because this is an AND
instruction, that means those bits aren't demanded from the other
operand, and thus SimplifyDemandedBits can turn (xor Y, 1) to (not Y).

We need to detect that this has happened to enable the DeMorgan
optimization. To do this we allow one of the xors to use -1 when
the outer operation is And.

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D132671

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/double-previous-failure.ll
    llvm/test/CodeGen/RISCV/setcc-logic.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b39466eacf8d..b16dee933363 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8429,8 +8429,20 @@ static SDValue combineDeMorganOfBoolean(SDNode *N, SelectionDAG &DAG) {
   if (!N0.hasOneUse() || !N1.hasOneUse())
     return SDValue();
 
-  // RHS of both xors needs to be 1.
-  if (!isOneConstant(N0.getOperand(1)) || !isOneConstant(N1.getOperand(1)))
+  SDValue N01 = N0.getOperand(1);
+  SDValue N11 = N1.getOperand(1);
+
+  // For AND, SimplifyDemandedBits may have turned one of the (xor X, 1) into
+  // (xor X, -1) based on the upper bits of the other operand being 0. If the
+  // operation is And, allow one of the Xors to use -1.
+  if (isOneConstant(N01)) {
+    if (!isOneConstant(N11) && !(IsAnd && isAllOnesConstant(N11)))
+      return SDValue();
+  } else if (isOneConstant(N11)) {
+    // N01 and N11 being 1 was already handled. Handle N11==1 and N01==-1.
+    if (!(IsAnd && isAllOnesConstant(N01)))
+      return SDValue();
+  } else
     return SDValue();
 
   EVT VT = N->getValueType(0);

diff  --git a/llvm/test/CodeGen/RISCV/double-previous-failure.ll b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
index 02633174a956..09af162ee4a6 100644
--- a/llvm/test/CodeGen/RISCV/double-previous-failure.ll
+++ b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
@@ -28,15 +28,12 @@ define i32 @main() nounwind {
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI1_1)
 ; RV32IFD-NEXT:    fld ft2, %lo(.LCPI1_1)(a0)
 ; RV32IFD-NEXT:    flt.d a0, ft0, ft1
-; RV32IFD-NEXT:    not a0, a0
 ; RV32IFD-NEXT:    flt.d a1, ft2, ft0
-; RV32IFD-NEXT:    xori a1, a1, 1
-; RV32IFD-NEXT:    and a0, a0, a1
-; RV32IFD-NEXT:    bnez a0, .LBB1_2
+; RV32IFD-NEXT:    or a0, a0, a1
+; RV32IFD-NEXT:    beqz a0, .LBB1_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.then
 ; RV32IFD-NEXT:    call abort at plt
 ; RV32IFD-NEXT:  .LBB1_2: # %if.end
-; RV32IFD-NEXT:    li a0, 0
 ; RV32IFD-NEXT:    call exit at plt
 entry:
   %call = call double @test(double 2.000000e+00)

diff  --git a/llvm/test/CodeGen/RISCV/setcc-logic.ll b/llvm/test/CodeGen/RISCV/setcc-logic.ll
index b379c8ae1c96..0dfd1731f656 100644
--- a/llvm/test/CodeGen/RISCV/setcc-logic.ll
+++ b/llvm/test/CodeGen/RISCV/setcc-logic.ll
@@ -123,19 +123,17 @@ define i1 @and_icmp_sge(i32 signext %a, i32 signext %b, i32 signext %c, i32 sign
 ; RV32I-LABEL: and_icmp_sge:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a0, a1
-; RV32I-NEXT:    not a0, a0
 ; RV32I-NEXT:    slt a1, a2, a3
-; RV32I-NEXT:    xori a1, a1, 1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: and_icmp_sge:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slt a0, a0, a1
-; RV64I-NEXT:    not a0, a0
 ; RV64I-NEXT:    slt a1, a2, a3
-; RV64I-NEXT:    xori a1, a1, 1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ret
   %cmp1 = icmp sge i32 %a, %b
   %cmp2 = icmp sge i32 %c, %d
@@ -147,19 +145,17 @@ define i1 @and_icmp_sle(i32 signext %a, i32 signext %b, i32 signext %c, i32 sign
 ; RV32I-LABEL: and_icmp_sle:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slt a0, a1, a0
-; RV32I-NEXT:    not a0, a0
 ; RV32I-NEXT:    slt a1, a3, a2
-; RV32I-NEXT:    xori a1, a1, 1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: and_icmp_sle:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    slt a0, a1, a0
-; RV64I-NEXT:    not a0, a0
 ; RV64I-NEXT:    slt a1, a3, a2
-; RV64I-NEXT:    xori a1, a1, 1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ret
   %cmp1 = icmp sle i32 %a, %b
   %cmp2 = icmp sle i32 %c, %d
@@ -171,19 +167,17 @@ define i1 @and_icmp_uge(i32 signext %a, i32 signext %b, i32 signext %c, i32 sign
 ; RV32I-LABEL: and_icmp_uge:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a0, a1
-; RV32I-NEXT:    not a0, a0
 ; RV32I-NEXT:    sltu a1, a2, a3
-; RV32I-NEXT:    xori a1, a1, 1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: and_icmp_uge:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sltu a0, a0, a1
-; RV64I-NEXT:    not a0, a0
 ; RV64I-NEXT:    sltu a1, a2, a3
-; RV64I-NEXT:    xori a1, a1, 1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ret
   %cmp1 = icmp uge i32 %a, %b
   %cmp2 = icmp uge i32 %c, %d
@@ -195,19 +189,17 @@ define i1 @and_icmp_ule(i32 signext %a, i32 signext %b, i32 signext %c, i32 sign
 ; RV32I-LABEL: and_icmp_ule:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    sltu a0, a1, a0
-; RV32I-NEXT:    not a0, a0
 ; RV32I-NEXT:    sltu a1, a3, a2
-; RV32I-NEXT:    xori a1, a1, 1
-; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    xori a0, a0, 1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: and_icmp_ule:
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    sltu a0, a1, a0
-; RV64I-NEXT:    not a0, a0
 ; RV64I-NEXT:    sltu a1, a3, a2
-; RV64I-NEXT:    xori a1, a1, 1
-; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    or a0, a0, a1
+; RV64I-NEXT:    xori a0, a0, 1
 ; RV64I-NEXT:    ret
   %cmp1 = icmp ule i32 %a, %b
   %cmp2 = icmp ule i32 %c, %d


        


More information about the llvm-commits mailing list