[llvm] r236695 - Update InstCombine to transform aggregate loads into scalar loads.
Mehdi Amini
mehdi.amini at apple.com
Wed May 6 22:52:41 PDT 2015
Author: mehdi_amini
Date: Thu May 7 00:52:40 2015
New Revision: 236695
URL: http://llvm.org/viewvc/llvm-project?rev=236695&view=rev
Log:
Update InstCombine to transform aggregate loads into scalar loads.
Summary:
One step further getting aggregate loads and store being optimized
properly. This will only handle struct with one element at this point.
Test Plan: Added unit tests for the new supported cases.
Reviewers: chandlerc, joker-eph, joker.eph, majnemer
Reviewed By: majnemer
Subscribers: pete, llvm-commits
Differential Revision: http://reviews.llvm.org/D8339
Patch by Amaury Sechet.
From: Amaury Sechet <amaury at fb.com>
Modified:
llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
llvm/trunk/test/Transforms/InstCombine/unpack-fca.ll
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp?rev=236695&r1=236694&r2=236695&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp Thu May 7 00:52:40 2015
@@ -314,7 +314,8 @@ Instruction *InstCombiner::visitAllocaIn
///
/// Note that this will create all of the instructions with whatever insert
/// point the \c InstCombiner currently is using.
-static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy) {
+static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
+ const Twine &Suffix = "") {
Value *Ptr = LI.getPointerOperand();
unsigned AS = LI.getPointerAddressSpace();
SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
@@ -322,7 +323,7 @@ static LoadInst *combineLoadToNewType(In
LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
- LI.getAlignment(), LI.getName());
+ LI.getAlignment(), LI.getName() + Suffix);
MDBuilder MDB(NewLoad->getContext());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
@@ -495,6 +496,31 @@ static Instruction *combineLoadToOperati
return nullptr;
}
+static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
+ // FIXME: We could probably with some care handle both volatile and atomic
+ // stores here but it isn't clear that this is important.
+ if (!LI.isSimple())
+ return nullptr;
+
+ Type *T = LI.getType();
+ if (!T->isAggregateType())
+ return nullptr;
+
+ assert(LI.getAlignment() && "Alignement must be set at this point");
+
+ if (auto *ST = dyn_cast<StructType>(T)) {
+ // If the struct only have one element, we unpack.
+ if (ST->getNumElements() == 1) {
+ LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
+ ".unpack");
+ return IC.ReplaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
+ UndefValue::get(T), NewLoad, 0, LI.getName()));
+ }
+ }
+
+ return nullptr;
+}
+
// If we can determine that all possible objects pointed to by the provided
// pointer value are, not only dereferenceable, but also definitively less than
// or equal to the provided maximum size, then return true. Otherwise, return
@@ -701,6 +727,9 @@ Instruction *InstCombiner::visitLoadInst
// FIXME: Some of it is okay for atomic loads; needs refactoring.
if (!LI.isSimple()) return nullptr;
+ if (Instruction *Res = unpackLoadToAggregate(*this, LI))
+ return Res;
+
// Do really simple store-to-load forwarding and load CSE, to catch cases
// where there are several consecutive memory accesses to the same location,
// separated by a few arithmetic operations.
@@ -832,7 +861,7 @@ static bool unpackStoreToAggregate(InstC
if (!T->isAggregateType())
return false;
- if (StructType *ST = dyn_cast<StructType>(T)) {
+ if (auto *ST = dyn_cast<StructType>(T)) {
// If the struct only have one element, we unpack.
if (ST->getNumElements() == 1) {
V = IC.Builder->CreateExtractValue(V, 0);
Modified: llvm/trunk/test/Transforms/InstCombine/unpack-fca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/unpack-fca.ll?rev=236695&r1=236694&r2=236695&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/unpack-fca.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/unpack-fca.ll Thu May 7 00:52:40 2015
@@ -12,20 +12,58 @@ declare i32 @A.foo(%A* nocapture %this)
declare i8* @allocmemory(i64)
-define void @structA() {
+define void @storeA() {
body:
%0 = tail call i8* @allocmemory(i64 32)
%1 = bitcast i8* %0 to %A*
+; CHECK-LABEL: storeA
; CHECK: store %A__vtbl* @A__vtblZ
store %A { %A__vtbl* @A__vtblZ }, %A* %1, align 8
ret void
}
-define void @structOfA() {
+define void @storeStructOfA() {
body:
%0 = tail call i8* @allocmemory(i64 32)
%1 = bitcast i8* %0 to { %A }*
+; CHECK-LABEL: storeStructOfA
; CHECK: store %A__vtbl* @A__vtblZ
store { %A } { %A { %A__vtbl* @A__vtblZ } }, { %A }* %1, align 8
ret void
}
+
+define %A @loadA() {
+body:
+ %0 = tail call i8* @allocmemory(i64 32)
+ %1 = bitcast i8* %0 to %A*
+; CHECK-LABEL: loadA
+; CHECK: load %A__vtbl*,
+; CHECK: insertvalue %A undef, %A__vtbl* {{.*}}, 0
+ %2 = load %A, %A* %1, align 8
+ ret %A %2
+}
+
+define { %A } @loadStructOfA() {
+body:
+ %0 = tail call i8* @allocmemory(i64 32)
+ %1 = bitcast i8* %0 to { %A }*
+; CHECK-LABEL: loadStructOfA
+; CHECK: load %A__vtbl*,
+; CHECK: insertvalue %A undef, %A__vtbl* {{.*}}, 0
+; CHECK: insertvalue { %A } undef, %A {{.*}}, 0
+ %2 = load { %A }, { %A }* %1, align 8
+ ret { %A } %2
+}
+
+define { %A } @structOfA() {
+body:
+ %0 = tail call i8* @allocmemory(i64 32)
+ %1 = bitcast i8* %0 to { %A }*
+; CHECK-LABEL: structOfA
+; CHECK: store %A__vtbl* @A__vtblZ
+ store { %A } { %A { %A__vtbl* @A__vtblZ } }, { %A }* %1, align 8
+ %2 = load { %A }, { %A }* %1, align 8
+; CHECK-NOT: load
+; CHECK: ret { %A } { %A { %A__vtbl* @A__vtblZ } }
+ ret { %A } %2
+}
More information about the llvm-commits
mailing list