[llvm] 40c0836 - [DAGCombiner] Add command line options to guard store width reduction

Guozhi Wei via llvm-commits llvm-commits at lists.llvm.org
Fri May 29 09:42:14 PDT 2020


Author: Guozhi Wei
Date: 2020-05-29T09:41:41-07:00
New Revision: 40c08367e411a178404d4f01a82f651188f2ed01

URL: https://github.com/llvm/llvm-project/commit/40c08367e411a178404d4f01a82f651188f2ed01
DIFF: https://github.com/llvm/llvm-project/commit/40c08367e411a178404d4f01a82f651188f2ed01.diff

LOG: [DAGCombiner] Add command line options to guard store width reduction
optimizations

As discussed in the thread http://lists.llvm.org/pipermail/llvm-dev/2020-May/141838.html,
some bit field access width can be reduced by ReduceLoadOpStoreWidth, some
can't. If two accesses are very close, and the first access width is reduced,
the second is not. Then the wide load of second access will be stalled for long
time.

This patch add command line options to guard ReduceLoadOpStoreWidth and
ShrinkLoadReplaceStoreWithStore, so users can use them to disable these
store width reduction optimizations.

Differential Revision: https://reviews.llvm.org/D80745

Added: 
    llvm/test/CodeGen/X86/clear-bitfield.ll
    llvm/test/CodeGen/X86/disable-shrink-store.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index d54663f4ce78..43bcf2e11888 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -125,6 +125,16 @@ static cl::opt<unsigned> StoreMergeDependenceLimit(
     cl::desc("Limit the number of times for the same StoreNode and RootNode "
              "to bail out in store merging dependence check"));
 
+static cl::opt<bool> EnableReduceLoadOpStoreWidth(
+    "combiner-reduce-load-op-store-width", cl::Hidden, cl::init(true),
+    cl::desc("DAG cominber enable reducing the width of load/op/store "
+             "sequence"));
+
+static cl::opt<bool> EnableShrinkLoadReplaceStoreWithStore(
+    "combiner-shrink-load-replace-store-with-store", cl::Hidden, cl::init(true),
+    cl::desc("DAG cominber enable load/<replace bytes>/store with "
+             "a narrower store"));
+
 namespace {
 
   class DAGCombiner {
@@ -15423,7 +15433,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
   // Y is known to provide just those bytes.  If so, we try to replace the
   // load + replace + store sequence with a single (narrower) store, which makes
   // the load dead.
-  if (Opc == ISD::OR) {
+  if (Opc == ISD::OR && EnableShrinkLoadReplaceStoreWithStore) {
     std::pair<unsigned, unsigned> MaskedLoad;
     MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain);
     if (MaskedLoad.first)
@@ -15439,6 +15449,9 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
         return NewST;
   }
 
+  if (!EnableReduceLoadOpStoreWidth)
+    return SDValue();
+
   if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) ||
       Value.getOperand(1).getOpcode() != ISD::Constant)
     return SDValue();

diff  --git a/llvm/test/CodeGen/X86/clear-bitfield.ll b/llvm/test/CodeGen/X86/clear-bitfield.ll
new file mode 100644
index 000000000000..01c35becefab
--- /dev/null
+++ b/llvm/test/CodeGen/X86/clear-bitfield.ll
@@ -0,0 +1,30 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -combiner-reduce-load-op-store-width=false | FileCheck %s
+
+%struct.bit_fields = type { i32 }
+
+define void @clear_b1(%struct.bit_fields* %ptr) {
+; CHECK-LABEL: clear_b1:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andl $-2, (%rdi)
+; CHECK-NEXT:    retq
+entry:
+  %0 = bitcast %struct.bit_fields* %ptr to i32*
+  %bf.load = load i32, i32* %0
+  %bf.clear = and i32 %bf.load, -2
+  store i32 %bf.clear, i32* %0
+  ret void
+}
+
+define void @clear16(%struct.bit_fields* %ptr) {
+; CHECK-LABEL: clear16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    andw $-2, (%rdi)
+; CHECK-NEXT:    retq
+entry:
+  %0 = bitcast %struct.bit_fields* %ptr to i16*
+  %bf.load = load i16, i16* %0
+  %bf.clear = and i16 %bf.load, -2
+  store i16 %bf.clear, i16* %0
+  ret void
+}

diff  --git a/llvm/test/CodeGen/X86/disable-shrink-store.ll b/llvm/test/CodeGen/X86/disable-shrink-store.ll
new file mode 100644
index 000000000000..871fc47dbfdb
--- /dev/null
+++ b/llvm/test/CodeGen/X86/disable-shrink-store.ll
@@ -0,0 +1,18 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-- -combiner-shrink-load-replace-store-with-store=false | FileCheck %s
+
+define void @shrink(i16* %ptr) {
+; CHECK-LABEL: shrink:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    movzbl (%rdi), %eax
+; CHECK-NEXT:    orl $25600, %eax # imm = 0x6400
+; CHECK-NEXT:    movw %ax, (%rdi)
+; CHECK-NEXT:    retq
+entry:
+  %val = load i16, i16* %ptr
+  %masked_val = and i16 %val, 255
+  %replaced_val = or i16 %masked_val, 25600
+  store i16 %replaced_val, i16* %ptr
+  ret void
+}
+


        


More information about the llvm-commits mailing list