[llvm] 2c6c169 - [ARM] Optimise ASRL/LSRL to smaller shifts using demand bits.

David Green via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 13 03:09:25 PDT 2020


Author: David Green
Date: 2020-03-13T10:09:03Z
New Revision: 2c6c169dbd6041b4575b2234c532aad50a472e81

URL: https://github.com/llvm/llvm-project/commit/2c6c169dbd6041b4575b2234c532aad50a472e81
DIFF: https://github.com/llvm/llvm-project/commit/2c6c169dbd6041b4575b2234c532aad50a472e81.diff

LOG: [ARM] Optimise ASRL/LSRL to smaller shifts using demand bits.

The ASRL/LSRL long shifts are generated from 64bit shifts. Once we have
them, it might turn out that enough of the 64bit result was not required
that we can use a smaller shift to perform the same result. As the
smaller shift can in general be folded in more way, such as into add
instructions in one of the test cases here, we can use the demand bit
analysis to prefer the smaller shifts where we can.

Differential Revision: https://reviews.llvm.org/D75371

Added: 
    

Modified: 
    llvm/lib/Target/ARM/ARMISelLowering.cpp
    llvm/lib/Target/ARM/ARMISelLowering.h
    llvm/test/CodeGen/Thumb2/fir.ll
    llvm/test/CodeGen/Thumb2/shift_parts.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index d5da047a1ca8..858ea8843162 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -16229,6 +16229,35 @@ ARMTargetLowering::targetShrinkDemandedConstant(SDValue Op,
   return false;
 }
 
+bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode(
+    SDValue Op, const APInt &OriginalDemandedBits,
+    const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
+    unsigned Depth) const {
+  unsigned Opc = Op.getOpcode();
+
+  switch (Opc) {
+  case ARMISD::ASRL:
+  case ARMISD::LSRL: {
+    // If this is result 0 and the other result is unused, see if the demand
+    // bits allow us to shrink this long shift into a standard small shift in
+    // the opposite direction.
+    if (Op.getResNo() == 0 && !Op->hasAnyUseOfValue(1) &&
+        isa<ConstantSDNode>(Op->getOperand(2))) {
+      unsigned ShAmt = Op->getConstantOperandVal(2);
+      if (ShAmt < 32 && OriginalDemandedBits.isSubsetOf(
+                            APInt::getAllOnesValue(32) << (32 - ShAmt)))
+        return TLO.CombineTo(
+            Op, TLO.DAG.getNode(
+                    ISD::SHL, SDLoc(Op), MVT::i32, Op.getOperand(1),
+                    TLO.DAG.getConstant(32 - ShAmt, SDLoc(Op), MVT::i32)));
+    }
+    break;
+  }
+  }
+
+  return TargetLowering::SimplifyDemandedBitsForTargetNode(
+      Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
+}
 
 //===----------------------------------------------------------------------===//
 //                           ARM Inline Assembly Support

diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 61a8d53ac6e6..99694cd9e030 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -354,6 +354,13 @@ class VectorType;
     SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const;
     SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
 
+    bool SimplifyDemandedBitsForTargetNode(SDValue Op,
+                                           const APInt &OriginalDemandedBits,
+                                           const APInt &OriginalDemandedElts,
+                                           KnownBits &Known,
+                                           TargetLoweringOpt &TLO,
+                                           unsigned Depth) const override;
+
     bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override;
 
     /// allowsMisalignedMemoryAccesses - Returns true if the target allows

diff  --git a/llvm/test/CodeGen/Thumb2/fir.ll b/llvm/test/CodeGen/Thumb2/fir.ll
index d03fb3d6279f..c3b4a57bc9fd 100644
--- a/llvm/test/CodeGen/Thumb2/fir.ll
+++ b/llvm/test/CodeGen/Thumb2/fir.ll
@@ -3,27 +3,15 @@
 ; RUN: llc --verify-machineinstrs -mtriple=thumbv8.1m.main-none-eabi -mattr=+dsp %s -o - | FileCheck %s -check-prefix=CHECK --check-prefix=CHECK-NOMVE
 
 define void @test1(i32* %p0, i32 *%p1, i32 *%p2, i32 *%pDst) {
-; CHECK-MVE-LABEL: test1:
-; CHECK-MVE:       @ %bb.0: @ %entry
-; CHECK-MVE-NEXT:    ldr r1, [r1]
-; CHECK-MVE-NEXT:    ldr r2, [r2]
-; CHECK-MVE-NEXT:    ldr r0, [r0]
-; CHECK-MVE-NEXT:    smull r2, r1, r2, r1
-; CHECK-MVE-NEXT:    lsrl r2, r1, #31
-; CHECK-MVE-NEXT:    bic r1, r2, #1
-; CHECK-MVE-NEXT:    add r0, r1
-; CHECK-MVE-NEXT:    str r0, [r3]
-; CHECK-MVE-NEXT:    bx lr
-;
-; CHECK-NOMVE-LABEL: test1:
-; CHECK-NOMVE:       @ %bb.0: @ %entry
-; CHECK-NOMVE-NEXT:    ldr r1, [r1]
-; CHECK-NOMVE-NEXT:    ldr r2, [r2]
-; CHECK-NOMVE-NEXT:    ldr r0, [r0]
-; CHECK-NOMVE-NEXT:    smmul r1, r2, r1
-; CHECK-NOMVE-NEXT:    add.w r0, r0, r1, lsl #1
-; CHECK-NOMVE-NEXT:    str r0, [r3]
-; CHECK-NOMVE-NEXT:    bx lr
+; CHECK-LABEL: test1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    ldr r1, [r1]
+; CHECK-NEXT:    ldr r2, [r2]
+; CHECK-NEXT:    ldr r0, [r0]
+; CHECK-NEXT:    smmul r1, r2, r1
+; CHECK-NEXT:    add.w r0, r0, r1, lsl #1
+; CHECK-NEXT:    str r0, [r3]
+; CHECK-NEXT:    bx lr
 entry:
   %l3 = load i32, i32* %p0, align 4
   %l4 = load i32, i32* %p1, align 4

diff  --git a/llvm/test/CodeGen/Thumb2/shift_parts.ll b/llvm/test/CodeGen/Thumb2/shift_parts.ll
index 721c63ddf3ac..d32b386c5910 100644
--- a/llvm/test/CodeGen/Thumb2/shift_parts.ll
+++ b/llvm/test/CodeGen/Thumb2/shift_parts.ll
@@ -422,16 +422,10 @@ entry:
 
 
 define i32 @ashr_demand_bottommask(i64 %x) {
-; CHECK-MVE-LABEL: ashr_demand_bottommask:
-; CHECK-MVE:       @ %bb.0: @ %entry
-; CHECK-MVE-NEXT:    lsrl r0, r1, #31
-; CHECK-MVE-NEXT:    bic r0, r0, #1
-; CHECK-MVE-NEXT:    bx lr
-;
-; CHECK-NON-MVE-LABEL: ashr_demand_bottommask:
-; CHECK-NON-MVE:       @ %bb.0: @ %entry
-; CHECK-NON-MVE-NEXT:    lsls r0, r1, #1
-; CHECK-NON-MVE-NEXT:    bx lr
+; CHECK-LABEL: ashr_demand_bottommask:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    lsls r0, r1, #1
+; CHECK-NEXT:    bx lr
 entry:
   %shr = ashr i64 %x, 31
   %t = trunc i64 %shr to i32
@@ -440,16 +434,10 @@ entry:
 }
 
 define i32 @lshr_demand_bottommask(i64 %x) {
-; CHECK-MVE-LABEL: lshr_demand_bottommask:
-; CHECK-MVE:       @ %bb.0: @ %entry
-; CHECK-MVE-NEXT:    lsrl r0, r1, #31
-; CHECK-MVE-NEXT:    bic r0, r0, #1
-; CHECK-MVE-NEXT:    bx lr
-;
-; CHECK-NON-MVE-LABEL: lshr_demand_bottommask:
-; CHECK-NON-MVE:       @ %bb.0: @ %entry
-; CHECK-NON-MVE-NEXT:    lsls r0, r1, #1
-; CHECK-NON-MVE-NEXT:    bx lr
+; CHECK-LABEL: lshr_demand_bottommask:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    lsls r0, r1, #1
+; CHECK-NEXT:    bx lr
 entry:
   %shr = lshr i64 %x, 31
   %t = trunc i64 %shr to i32
@@ -470,17 +458,11 @@ entry:
 }
 
 define i32 @ashr_demand_bottommask2(i64 %x) {
-; CHECK-MVE-LABEL: ashr_demand_bottommask2:
-; CHECK-MVE:       @ %bb.0: @ %entry
-; CHECK-MVE-NEXT:    lsrl r0, r1, #31
-; CHECK-MVE-NEXT:    bic r0, r0, #3
-; CHECK-MVE-NEXT:    bx lr
-;
-; CHECK-NON-MVE-LABEL: ashr_demand_bottommask2:
-; CHECK-NON-MVE:       @ %bb.0: @ %entry
-; CHECK-NON-MVE-NEXT:    mvn r0, #2
-; CHECK-NON-MVE-NEXT:    and.w r0, r0, r1, lsl #1
-; CHECK-NON-MVE-NEXT:    bx lr
+; CHECK-LABEL: ashr_demand_bottommask2:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r0, #2
+; CHECK-NEXT:    and.w r0, r0, r1, lsl #1
+; CHECK-NEXT:    bx lr
 entry:
   %shr = ashr i64 %x, 31
   %t = trunc i64 %shr to i32
@@ -489,17 +471,11 @@ entry:
 }
 
 define i32 @lshr_demand_bottommask2(i64 %x) {
-; CHECK-MVE-LABEL: lshr_demand_bottommask2:
-; CHECK-MVE:       @ %bb.0: @ %entry
-; CHECK-MVE-NEXT:    lsrl r0, r1, #31
-; CHECK-MVE-NEXT:    bic r0, r0, #3
-; CHECK-MVE-NEXT:    bx lr
-;
-; CHECK-NON-MVE-LABEL: lshr_demand_bottommask2:
-; CHECK-NON-MVE:       @ %bb.0: @ %entry
-; CHECK-NON-MVE-NEXT:    mvn r0, #2
-; CHECK-NON-MVE-NEXT:    and.w r0, r0, r1, lsl #1
-; CHECK-NON-MVE-NEXT:    bx lr
+; CHECK-LABEL: lshr_demand_bottommask2:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    mvn r0, #2
+; CHECK-NEXT:    and.w r0, r0, r1, lsl #1
+; CHECK-NEXT:    bx lr
 entry:
   %shr = lshr i64 %x, 31
   %t = trunc i64 %shr to i32


        


More information about the llvm-commits mailing list