[llvm-commits] [llvm] r111665 - in /llvm/trunk: lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp test/Transforms/InstCombine/2010-08-19-StoreNarrowing.ll

Owen Anderson resistor at mac.com
Fri Aug 20 11:24:43 PDT 2010


Author: resistor
Date: Fri Aug 20 13:24:43 2010
New Revision: 111665

URL: http://llvm.org/viewvc/llvm-project?rev=111665&view=rev
Log:
Re-apply r111568 with a fix for the clang self-host.

Added:
    llvm/trunk/test/Transforms/InstCombine/2010-08-19-StoreNarrowing.ll
Modified:
    llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp?rev=111665&r1=111664&r2=111665&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp Fri Aug 20 13:24:43 2010
@@ -14,11 +14,13 @@
 #include "InstCombine.h"
 #include "llvm/IntrinsicInst.h"
 #include "llvm/Analysis/Loads.h"
+#include "llvm/Support/PatternMatch.h"
 #include "llvm/Target/TargetData.h"
 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
 #include "llvm/Transforms/Utils/Local.h"
 #include "llvm/ADT/Statistic.h"
 using namespace llvm;
+using namespace PatternMatch;
 
 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
 
@@ -473,6 +475,51 @@
   
   if (SI.isVolatile()) return 0;  // Don't hack volatile stores.
 
+  // Attempt to narrow sequences where we load a wide value, perform bitmasks
+  // that only affect the low bits of it, and then store it back.  This 
+  // typically arises from bitfield initializers in C++.
+  ConstantInt *CI1 =0, *CI2 = 0;
+  Value *Ld = 0;
+  if (getTargetData() &&
+      match(SI.getValueOperand(),
+            m_And(m_Or(m_Value(Ld), m_ConstantInt(CI1)), m_ConstantInt(CI2))) &&
+      isa<LoadInst>(Ld) &&
+      equivalentAddressValues(cast<LoadInst>(Ld)->getPointerOperand(), Ptr)) {
+    APInt OrMask = CI1->getValue();
+    APInt AndMask = CI2->getValue();
+    
+    // Compute the prefix of the value that is unmodified by the bitmasking.
+    unsigned LeadingAndOnes = AndMask.countLeadingOnes();
+    unsigned LeadingOrZeros = OrMask.countLeadingZeros();
+    unsigned Prefix = std::min(LeadingAndOnes, LeadingOrZeros);
+    uint64_t NewWidth = AndMask.getBitWidth() - Prefix;
+    while (NewWidth < AndMask.getBitWidth() &&
+           getTargetData()->isIllegalInteger(NewWidth))
+      NewWidth = NextPowerOf2(NewWidth);
+    
+    // If we can find a power-of-2 prefix (and if the values we're working with
+    // are themselves POT widths), then we can narrow the store.  We rely on
+    // later iterations of instcombine to propagate the demanded bits to narrow
+    // the other computations in the chain.
+    if (NewWidth < AndMask.getBitWidth() &&
+        getTargetData()->isLegalInteger(NewWidth)) {
+      const Type *NewType = IntegerType::get(Ptr->getContext(), NewWidth);
+      const Type *NewPtrType = PointerType::getUnqual(NewType);
+      
+      Value *NewVal = Builder->CreateTrunc(SI.getValueOperand(), NewType);
+      Value *NewPtr = Builder->CreateBitCast(Ptr, NewPtrType);
+      
+      // On big endian targets, we need to offset from the original pointer
+      // in order to store to the low-bit suffix.
+      if (getTargetData()->isBigEndian()) {
+        uint64_t GEPOffset = (AndMask.getBitWidth() - NewWidth) / 8;
+        NewPtr = Builder->CreateConstGEP1_64(NewPtr, GEPOffset);
+      }
+      
+      return new StoreInst(NewVal, NewPtr);
+    }
+  }
+
   // store X, null    -> turns into 'unreachable' in SimplifyCFG
   if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
     if (!isa<UndefValue>(Val)) {

Added: llvm/trunk/test/Transforms/InstCombine/2010-08-19-StoreNarrowing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/2010-08-19-StoreNarrowing.ll?rev=111665&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/2010-08-19-StoreNarrowing.ll (added)
+++ llvm/trunk/test/Transforms/InstCombine/2010-08-19-StoreNarrowing.ll Fri Aug 20 13:24:43 2010
@@ -0,0 +1,21 @@
+; RUN: opt -S -instcombine %s | not grep and
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-darwin10.0.0"
+
+%class.A = type { i8, [3 x i8] }
+
+define void @_ZN1AC2Ev(%class.A* %this) nounwind ssp align 2 {
+entry:
+  %0 = bitcast %class.A* %this to i32*            ; <i32*> [#uses=5]
+  %1 = load i32* %0, align 4                      ; <i32> [#uses=1]
+  %2 = and i32 %1, -8                             ; <i32> [#uses=2]
+  store i32 %2, i32* %0, align 4
+  %3 = and i32 %2, -57                            ; <i32> [#uses=1]
+  %4 = or i32 %3, 8                               ; <i32> [#uses=2]
+  store i32 %4, i32* %0, align 4
+  %5 = and i32 %4, -65                            ; <i32> [#uses=2]
+  store i32 %5, i32* %0, align 4
+  %6 = and i32 %5, -129                           ; <i32> [#uses=1]
+  store i32 %6, i32* %0, align 4
+  ret void
+}





More information about the llvm-commits mailing list