[llvm] e68257f - [RISCV][SelectionDAG] Enable TargetLowering::hasBitTest for masks that fit in ANDI.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 28 12:46:51 PDT 2022


Author: Craig Topper
Date: 2022-03-28T12:46:36-07:00
New Revision: e68257fceee7e811d0c554f164705da2959c7519

URL: https://github.com/llvm/llvm-project/commit/e68257fceee7e811d0c554f164705da2959c7519
DIFF: https://github.com/llvm/llvm-project/commit/e68257fceee7e811d0c554f164705da2959c7519.diff

LOG: [RISCV][SelectionDAG] Enable TargetLowering::hasBitTest for masks that fit in ANDI.

Modified DAGCombiner to pass the shift the bittest input and the shift amount
to hasBitTest. This matches the other call to hasBitTest in TargetLowering.h

This is an alternative to D122454.

Reviewed By: luismarques

Differential Revision: https://reviews.llvm.org/D122458

Added: 
    llvm/test/CodeGen/RISCV/bittest.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h
    llvm/test/CodeGen/RISCV/rv32zbs.ll
    llvm/test/CodeGen/RISCV/rv64zbs.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index c3b12f8b923fa..585872065ad49 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -5927,6 +5927,9 @@ static SDValue combineShiftAnd1ToBitTest(SDNode *And, SelectionDAG &DAG) {
   if (ShiftAmt.uge(VTBitWidth))
     return SDValue();
 
+  if (!TLI.hasBitTest(Srl.getOperand(0), Srl.getOperand(1)))
+    return SDValue();
+
   // Turn this into a bit-test pattern using mask op + setcc:
   // and (not (srl X, C)), 1 --> (and X, 1<<C) == 0
   SDLoc DL(And);
@@ -6352,9 +6355,8 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
   if (SDValue Shifts = unfoldExtremeBitClearingToShifts(N))
     return Shifts;
 
-  if (TLI.hasBitTest(N0, N1))
-    if (SDValue V = combineShiftAnd1ToBitTest(N, DAG))
-      return V;
+  if (SDValue V = combineShiftAnd1ToBitTest(N, DAG))
+    return V;
 
   // Recognize the following pattern:
   //

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b8043048d73e7..862fa1d8a3515 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1245,6 +1245,12 @@ bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
          !isa<ConstantSDNode>(Y);
 }
 
+bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
+  // We can use ANDI+SEQZ/SNEZ as a bit test. Y contains the bit position.
+  auto *C = dyn_cast<ConstantSDNode>(Y);
+  return C && C->getAPIntValue().ule(10);
+}
+
 /// Check if sinking \p I's operands to I's basic block is profitable, because
 /// the operands can be folded into a target instruction, e.g.
 /// splats of scalars can fold into vector instructions.

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 8a4c83f346060..cc0c4ccdeac89 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -346,6 +346,7 @@ class RISCVTargetLowering : public TargetLowering {
   bool isCheapToSpeculateCttz() const override;
   bool isCheapToSpeculateCtlz() const override;
   bool hasAndNotCompare(SDValue Y) const override;
+  bool hasBitTest(SDValue X, SDValue Y) const override;
   bool shouldSinkOperands(Instruction *I,
                           SmallVectorImpl<Use *> &Ops) const override;
   bool isFPImmLegal(const APFloat &Imm, EVT VT,

diff  --git a/llvm/test/CodeGen/RISCV/bittest.ll b/llvm/test/CodeGen/RISCV/bittest.ll
new file mode 100644
index 0000000000000..45676348ebe07
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/bittest.ll
@@ -0,0 +1,189 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64I
+; RUN: llc -mtriple=riscv64 -mattr=+zbs -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ZBS
+
+define signext i32 @bittest_7_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: bittest_7_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 128
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBS-LABEL: bittest_7_i32:
+; RV64ZBS:       # %bb.0:
+; RV64ZBS-NEXT:    andi a0, a0, 128
+; RV64ZBS-NEXT:    seqz a0, a0
+; RV64ZBS-NEXT:    ret
+  %shr = lshr i32 %a, 7
+  %not = xor i32 %shr, -1
+  %and = and i32 %not, 1
+  ret i32 %and
+}
+
+define signext i32 @bittest_10_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: bittest_10_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 1024
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBS-LABEL: bittest_10_i32:
+; RV64ZBS:       # %bb.0:
+; RV64ZBS-NEXT:    andi a0, a0, 1024
+; RV64ZBS-NEXT:    seqz a0, a0
+; RV64ZBS-NEXT:    ret
+  %shr = lshr i32 %a, 10
+  %not = xor i32 %shr, -1
+  %and = and i32 %not, 1
+  ret i32 %and
+}
+
+define signext i32 @bittest_11_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: bittest_11_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a0, a0, 11
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV64ZBS-LABEL: bittest_11_i32:
+; RV64ZBS:       # %bb.0:
+; RV64ZBS-NEXT:    bexti a0, a0, 11
+; RV64ZBS-NEXT:    xori a0, a0, 1
+; RV64ZBS-NEXT:    ret
+  %shr = lshr i32 %a, 11
+  %not = xor i32 %shr, -1
+  %and = and i32 %not, 1
+  ret i32 %and
+}
+
+define signext i32 @bittest_31_i32(i32 signext %a) nounwind {
+; RV64I-LABEL: bittest_31_i32:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    srliw a0, a0, 31
+; RV64I-NEXT:    ret
+;
+; RV64ZBS-LABEL: bittest_31_i32:
+; RV64ZBS:       # %bb.0:
+; RV64ZBS-NEXT:    not a0, a0
+; RV64ZBS-NEXT:    srliw a0, a0, 31
+; RV64ZBS-NEXT:    ret
+  %shr = lshr i32 %a, 31
+  %not = xor i32 %shr, -1
+  %and = and i32 %not, 1
+  ret i32 %and
+}
+
+define i64 @bittest_7_i64(i64 %a) nounwind {
+; RV64I-LABEL: bittest_7_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 128
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBS-LABEL: bittest_7_i64:
+; RV64ZBS:       # %bb.0:
+; RV64ZBS-NEXT:    andi a0, a0, 128
+; RV64ZBS-NEXT:    seqz a0, a0
+; RV64ZBS-NEXT:    ret
+  %shr = lshr i64 %a, 7
+  %not = xor i64 %shr, -1
+  %and = and i64 %not, 1
+  ret i64 %and
+}
+
+define i64 @bittest_10_i64(i64 %a) nounwind {
+; RV64I-LABEL: bittest_10_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    andi a0, a0, 1024
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ret
+;
+; RV64ZBS-LABEL: bittest_10_i64:
+; RV64ZBS:       # %bb.0:
+; RV64ZBS-NEXT:    andi a0, a0, 1024
+; RV64ZBS-NEXT:    seqz a0, a0
+; RV64ZBS-NEXT:    ret
+  %shr = lshr i64 %a, 10
+  %not = xor i64 %shr, -1
+  %and = and i64 %not, 1
+  ret i64 %and
+}
+
+define i64 @bittest_11_i64(i64 %a) nounwind {
+; RV64I-LABEL: bittest_11_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a0, a0, 11
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV64ZBS-LABEL: bittest_11_i64:
+; RV64ZBS:       # %bb.0:
+; RV64ZBS-NEXT:    bexti a0, a0, 11
+; RV64ZBS-NEXT:    xori a0, a0, 1
+; RV64ZBS-NEXT:    ret
+  %shr = lshr i64 %a, 11
+  %not = xor i64 %shr, -1
+  %and = and i64 %not, 1
+  ret i64 %and
+}
+
+define i64 @bittest_31_i64(i64 %a) nounwind {
+; RV64I-LABEL: bittest_31_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a0, a0, 31
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV64ZBS-LABEL: bittest_31_i64:
+; RV64ZBS:       # %bb.0:
+; RV64ZBS-NEXT:    bexti a0, a0, 31
+; RV64ZBS-NEXT:    xori a0, a0, 1
+; RV64ZBS-NEXT:    ret
+  %shr = lshr i64 %a, 31
+  %not = xor i64 %shr, -1
+  %and = and i64 %not, 1
+  ret i64 %and
+}
+
+define i64 @bittest_32_i64(i64 %a) nounwind {
+; RV64I-LABEL: bittest_32_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srli a0, a0, 32
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    andi a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV64ZBS-LABEL: bittest_32_i64:
+; RV64ZBS:       # %bb.0:
+; RV64ZBS-NEXT:    bexti a0, a0, 32
+; RV64ZBS-NEXT:    xori a0, a0, 1
+; RV64ZBS-NEXT:    ret
+  %shr = lshr i64 %a, 32
+  %not = xor i64 %shr, -1
+  %and = and i64 %not, 1
+  ret i64 %and
+}
+
+define i64 @bittest_63_i64(i64 %a) nounwind {
+; RV64I-LABEL: bittest_63_i64:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    not a0, a0
+; RV64I-NEXT:    srli a0, a0, 63
+; RV64I-NEXT:    ret
+;
+; RV64ZBS-LABEL: bittest_63_i64:
+; RV64ZBS:       # %bb.0:
+; RV64ZBS-NEXT:    not a0, a0
+; RV64ZBS-NEXT:    srli a0, a0, 63
+; RV64ZBS-NEXT:    ret
+  %shr = lshr i64 %a, 63
+  %not = xor i64 %shr, -1
+  %and = and i64 %not, 1
+  ret i64 %and
+}

diff  --git a/llvm/test/CodeGen/RISCV/rv32zbs.ll b/llvm/test/CodeGen/RISCV/rv32zbs.ll
index ee05d1152f37b..cef34fb2c13ca 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbs.ll
@@ -355,86 +355,6 @@ define i64 @bexti_i64(i64 %a) nounwind {
   ret i64 %and
 }
 
-define i32 @bexti_xor_i32(i32 %a) nounwind {
-; RV32I-LABEL: bexti_xor_i32:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a0, a0, 7
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    andi a0, a0, 1
-; RV32I-NEXT:    ret
-;
-; RV32ZBS-LABEL: bexti_xor_i32:
-; RV32ZBS:       # %bb.0:
-; RV32ZBS-NEXT:    bexti a0, a0, 7
-; RV32ZBS-NEXT:    xori a0, a0, 1
-; RV32ZBS-NEXT:    ret
-  %shr = lshr i32 %a, 7
-  %not = xor i32 %shr, -1
-  %and = and i32 %not, 1
-  ret i32 %and
-}
-
-define i64 @bexti_xor_i64(i64 %a) nounwind {
-; RV32I-LABEL: bexti_xor_i64:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a0, a0, 7
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    andi a0, a0, 1
-; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    ret
-;
-; RV32ZBS-LABEL: bexti_xor_i64:
-; RV32ZBS:       # %bb.0:
-; RV32ZBS-NEXT:    bexti a0, a0, 7
-; RV32ZBS-NEXT:    xori a0, a0, 1
-; RV32ZBS-NEXT:    li a1, 0
-; RV32ZBS-NEXT:    ret
-  %shr = lshr i64 %a, 7
-  %not = xor i64 %shr, -1
-  %and = and i64 %not, 1
-  ret i64 %and
-}
-
-define i32 @bexti_xor_i32_1(i32 %a) nounwind {
-; RV32I-LABEL: bexti_xor_i32_1:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a0, a0, 7
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    andi a0, a0, 1
-; RV32I-NEXT:    ret
-;
-; RV32ZBS-LABEL: bexti_xor_i32_1:
-; RV32ZBS:       # %bb.0:
-; RV32ZBS-NEXT:    bexti a0, a0, 7
-; RV32ZBS-NEXT:    xori a0, a0, 1
-; RV32ZBS-NEXT:    ret
-  %shr = lshr i32 %a, 7
-  %and = and i32 %shr, 1
-  %xor = xor i32 %and, 1
-  ret i32 %xor
-}
-
-define i64 @bexti_xor_i64_1(i64 %a) nounwind {
-; RV32I-LABEL: bexti_xor_i64_1:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    srli a0, a0, 7
-; RV32I-NEXT:    not a0, a0
-; RV32I-NEXT:    andi a0, a0, 1
-; RV32I-NEXT:    li a1, 0
-; RV32I-NEXT:    ret
-;
-; RV32ZBS-LABEL: bexti_xor_i64_1:
-; RV32ZBS:       # %bb.0:
-; RV32ZBS-NEXT:    bexti a0, a0, 7
-; RV32ZBS-NEXT:    xori a0, a0, 1
-; RV32ZBS-NEXT:    li a1, 0
-; RV32ZBS-NEXT:    ret
-  %shr = lshr i64 %a, 7
-  %and = and i64 %shr, 1
-  %xor = xor i64 %and, 1
-  ret i64 %xor
-}
-
 define i32 @bclri_i32_10(i32 %a) nounwind {
 ; RV32I-LABEL: bclri_i32_10:
 ; RV32I:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64zbs.ll
index e8c8e53d86350..e7eb1390c9778 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbs.ll
@@ -438,82 +438,6 @@ define i64 @bexti_i64(i64 %a) nounwind {
   ret i64 %and
 }
 
-define signext i32 @bexti_xor_i32(i32 signext %a) nounwind {
-; RV64I-LABEL: bexti_xor_i32:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a0, a0, 7
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    andi a0, a0, 1
-; RV64I-NEXT:    ret
-;
-; RV64ZBS-LABEL: bexti_xor_i32:
-; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    bexti a0, a0, 7
-; RV64ZBS-NEXT:    xori a0, a0, 1
-; RV64ZBS-NEXT:    ret
-  %shr = lshr i32 %a, 7
-  %not = xor i32 %shr, -1
-  %and = and i32 %not, 1
-  ret i32 %and
-}
-
-define i64 @bexti_xor_i64(i64 %a) nounwind {
-; RV64I-LABEL: bexti_xor_i64:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a0, a0, 7
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    andi a0, a0, 1
-; RV64I-NEXT:    ret
-;
-; RV64ZBS-LABEL: bexti_xor_i64:
-; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    bexti a0, a0, 7
-; RV64ZBS-NEXT:    xori a0, a0, 1
-; RV64ZBS-NEXT:    ret
-  %shr = lshr i64 %a, 7
-  %not = xor i64 %shr, -1
-  %and = and i64 %not, 1
-  ret i64 %and
-}
-
-define signext i32 @bexti_xor_i32_1(i32 signext %a) nounwind {
-; RV64I-LABEL: bexti_xor_i32_1:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a0, a0, 7
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    andi a0, a0, 1
-; RV64I-NEXT:    ret
-;
-; RV64ZBS-LABEL: bexti_xor_i32_1:
-; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    bexti a0, a0, 7
-; RV64ZBS-NEXT:    xori a0, a0, 1
-; RV64ZBS-NEXT:    ret
-  %shr = lshr i32 %a, 7
-  %and = and i32 %shr, 1
-  %xor = xor i32 %and, 1
-  ret i32 %xor
-}
-
-define i64 @bexti_xor_i64_1(i64 %a) nounwind {
-; RV64I-LABEL: bexti_xor_i64_1:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    srli a0, a0, 7
-; RV64I-NEXT:    not a0, a0
-; RV64I-NEXT:    andi a0, a0, 1
-; RV64I-NEXT:    ret
-;
-; RV64ZBS-LABEL: bexti_xor_i64_1:
-; RV64ZBS:       # %bb.0:
-; RV64ZBS-NEXT:    bexti a0, a0, 7
-; RV64ZBS-NEXT:    xori a0, a0, 1
-; RV64ZBS-NEXT:    ret
-  %shr = lshr i64 %a, 7
-  %and = and i64 %shr, 1
-  %xor = xor i64 %and, 1
-  ret i64 %xor
-}
-
 define signext i32 @bclri_i32_10(i32 signext %a) nounwind {
 ; RV64I-LABEL: bclri_i32_10:
 ; RV64I:       # %bb.0:


        


More information about the llvm-commits mailing list