[llvm] r273105 - [LoadCombine] Combine Loads formed from GEPS with negative indexes
David Majnemer via llvm-commits
llvm-commits at lists.llvm.org
Sat Jun 18 23:14:56 PDT 2016
Author: majnemer
Date: Sun Jun 19 01:14:56 2016
New Revision: 273105
URL: http://llvm.org/viewvc/llvm-project?rev=273105&view=rev
Log:
[LoadCombine] Combine Loads formed from GEPS with negative indexes
Change the underlying offset and comparisons to use int64_t instead of
uint64_t.
Patch by River Riddle!
Differential Revision: http://reviews.llvm.org/D21499
Added:
llvm/trunk/test/Transforms/LoadCombine/load-combine-negativegep.ll
Modified:
llvm/trunk/lib/Transforms/Scalar/LoadCombine.cpp
Modified: llvm/trunk/lib/Transforms/Scalar/LoadCombine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/LoadCombine.cpp?rev=273105&r1=273104&r2=273105&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/LoadCombine.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/LoadCombine.cpp Sun Jun 19 01:14:56 2016
@@ -40,7 +40,7 @@ STATISTIC(NumLoadsCombined, "Number of l
namespace {
struct PointerOffsetPair {
Value *Pointer;
- uint64_t Offset;
+ int64_t Offset;
};
struct LoadPOPPair {
@@ -102,7 +102,7 @@ PointerOffsetPair LoadCombine::getPointe
unsigned BitWidth = DL.getPointerTypeSizeInBits(GEP->getType());
APInt Offset(BitWidth, 0);
if (GEP->accumulateConstantOffset(DL, Offset))
- POP.Offset += Offset.getZExtValue();
+ POP.Offset += Offset.getSExtValue();
else
// Can't handle GEPs with variable indices.
return POP;
@@ -138,28 +138,31 @@ bool LoadCombine::aggregateLoads(SmallVe
LoadInst *BaseLoad = nullptr;
SmallVector<LoadPOPPair, 8> AggregateLoads;
bool Combined = false;
- uint64_t PrevOffset = -1ull;
+ bool ValidPrevOffset = false;
+ int64_t PrevOffset = 0;
uint64_t PrevSize = 0;
for (auto &L : Loads) {
- if (PrevOffset == -1ull) {
+ if (ValidPrevOffset == false) {
BaseLoad = L.Load;
PrevOffset = L.POP.Offset;
PrevSize = L.Load->getModule()->getDataLayout().getTypeStoreSize(
L.Load->getType());
AggregateLoads.push_back(L);
+ ValidPrevOffset = true;
continue;
}
if (L.Load->getAlignment() > BaseLoad->getAlignment())
continue;
- if (L.POP.Offset > PrevOffset + PrevSize) {
+ int64_t PrevEnd = PrevOffset + PrevSize;
+ if (L.POP.Offset > PrevEnd) {
// No other load will be combinable
if (combineLoads(AggregateLoads))
Combined = true;
AggregateLoads.clear();
- PrevOffset = -1;
+ ValidPrevOffset = false;
continue;
}
- if (L.POP.Offset != PrevOffset + PrevSize)
+ if (L.POP.Offset != PrevEnd)
// This load is offset less than the size of the last load.
// FIXME: We may want to handle this case.
continue;
Added: llvm/trunk/test/Transforms/LoadCombine/load-combine-negativegep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadCombine/load-combine-negativegep.ll?rev=273105&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoadCombine/load-combine-negativegep.ll (added)
+++ llvm/trunk/test/Transforms/LoadCombine/load-combine-negativegep.ll Sun Jun 19 01:14:56 2016
@@ -0,0 +1,19 @@
+; RUN: opt -basicaa -load-combine -S < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @Load_NegGep(i32* %i){
+ %1 = getelementptr inbounds i32, i32* %i, i64 -1
+ %2 = load i32, i32* %1, align 4
+ %3 = load i32, i32* %i, align 4
+ %4 = add nsw i32 %3, %2
+ ret i32 %4
+; CHECK-LABEL: @Load_NegGep(
+; CHECK: %[[load:.*]] = load i64
+; CHECK: %[[combine_extract_lo:.*]] = trunc i64 %[[load]] to i32
+; CHECK: %[[combine_extract_shift:.*]] = lshr i64 %[[load]], 32
+; CHECK: %[[combine_extract_hi:.*]] = trunc i64 %[[combine_extract_shift]] to i32
+; CHECK: %[[add:.*]] = add nsw i32 %[[combine_extract_hi]], %[[combine_extract_lo]]
+}
+
+
More information about the llvm-commits
mailing list