[llvm] 958abe8 - [LoopLoadElim] Add stores with matching sizes as load-store candidates

Jolanta Jensen via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 2 05:12:10 PDT 2022


Author: Jolanta Jensen
Date: 2022-09-02T13:11:25+01:00
New Revision: 958abe864ab777302d1e7aee7b6c5ea4b0fe9be6

URL: https://github.com/llvm/llvm-project/commit/958abe864ab777302d1e7aee7b6c5ea4b0fe9be6
DIFF: https://github.com/llvm/llvm-project/commit/958abe864ab777302d1e7aee7b6c5ea4b0fe9be6.diff

LOG: [LoopLoadElim] Add stores with matching sizes as load-store candidates

We are not building up a proper list of load-store candidates because
we are throwing away stores where the type don't match the load.
This patch adds stores with matching store sizes as candidates.
Author of the original patch: David Sherwood.

Differential Revision: https://reviews.llvm.org/D130233

Added: 
    

Modified: 
    llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
    llvm/test/Transforms/LoopLoadElim/type-mismatch-opaque-ptr.ll
    llvm/test/Transforms/LoopLoadElim/type-mismatch.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
index 1877ac1dfd083..13049c701e68a 100644
--- a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
@@ -98,10 +98,12 @@ struct StoreToLoadForwardingCandidate {
     Value *LoadPtr = Load->getPointerOperand();
     Value *StorePtr = Store->getPointerOperand();
     Type *LoadType = getLoadStoreType(Load);
+    auto &DL = Load->getParent()->getModule()->getDataLayout();
 
     assert(LoadPtr->getType()->getPointerAddressSpace() ==
                StorePtr->getType()->getPointerAddressSpace() &&
-           LoadType == getLoadStoreType(Store) &&
+           DL.getTypeSizeInBits(LoadType) ==
+               DL.getTypeSizeInBits(getLoadStoreType(Store)) &&
            "Should be a known dependence");
 
     // Currently we only support accesses with unit stride.  FIXME: we should be
@@ -111,7 +113,6 @@ struct StoreToLoadForwardingCandidate {
         getPtrStride(PSE, LoadType, StorePtr, L) != 1)
       return false;
 
-    auto &DL = Load->getParent()->getModule()->getDataLayout();
     unsigned TypeByteSize = DL.getTypeAllocSize(const_cast<Type *>(LoadType));
 
     auto *LoadPtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(LoadPtr));
@@ -211,9 +212,10 @@ class LoadEliminationForLoop {
       if (!Load)
         continue;
 
-      // Only progagate the value if they are of the same type.
-      if (Store->getPointerOperandType() != Load->getPointerOperandType() ||
-          getLoadStoreType(Store) != getLoadStoreType(Load))
+      // Only propagate if the stored values are bit/pointer castable.
+      if (!CastInst::isBitOrNoopPointerCastable(
+              getLoadStoreType(Store), getLoadStoreType(Load),
+              Store->getParent()->getModule()->getDataLayout()))
         continue;
 
       Candidates.emplace_front(Load, Store);
@@ -438,7 +440,21 @@ class LoadEliminationForLoop {
     PHINode *PHI = PHINode::Create(Initial->getType(), 2, "store_forwarded",
                                    &L->getHeader()->front());
     PHI->addIncoming(Initial, PH);
-    PHI->addIncoming(Cand.Store->getOperand(0), L->getLoopLatch());
+
+    Type *LoadType = Initial->getType();
+    Type *StoreType = Cand.Store->getValueOperand()->getType();
+    auto &DL = Cand.Load->getParent()->getModule()->getDataLayout();
+    (void)DL;
+
+    assert(DL.getTypeSizeInBits(LoadType) == DL.getTypeSizeInBits(StoreType) &&
+           "The type sizes should match!");
+
+    Value *StoreValue = Cand.Store->getValueOperand();
+    if (LoadType != StoreType)
+      StoreValue = CastInst::CreateBitOrPointerCast(
+          StoreValue, LoadType, "store_forward_cast", Cand.Store);
+
+    PHI->addIncoming(StoreValue, L->getLoopLatch());
 
     Cand.Load->replaceAllUsesWith(PHI);
   }

diff  --git a/llvm/test/Transforms/LoopLoadElim/type-mismatch-opaque-ptr.ll b/llvm/test/Transforms/LoopLoadElim/type-mismatch-opaque-ptr.ll
index b1fb13f2f9f60..ff799eea1b54e 100644
--- a/llvm/test/Transforms/LoopLoadElim/type-mismatch-opaque-ptr.ll
+++ b/llvm/test/Transforms/LoopLoadElim/type-mismatch-opaque-ptr.ll
@@ -1,31 +1,35 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt --opaque-pointers -loop-load-elim -S < %s | FileCheck %s
 
-; Don't crash if the store and the load use 
diff erent types.
+; If the store and the load use 
diff erent types, but have the same
+; size then we should still be able to forward the value.
 ;
 ;   for (unsigned i = 0; i < 100; i++) {
 ;     A[i+1] = B[i] + 2;
 ;     C[i] = ((float*)A)[i] * 2;
 ;   }
 
-target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target datalayout = "e-m:o-p64:64:64-i64:64-f80:128-n8:16:32:64-S128"
 
 define void @f(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) {
 ; CHECK-LABEL: @f(
 ; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[LOAD_INITIAL:%.*]] = load float, ptr [[A:%.*]], align 4
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[STORE_FORWARDED:%.*]] = phi float [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
 ; CHECK-NEXT:    [[BIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[CIDX:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[AIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[B:%.*]] = load i32, ptr [[BIDX]], align 4
 ; CHECK-NEXT:    [[A_P1:%.*]] = add i32 [[B]], 2
+; CHECK-NEXT:    [[STORE_FORWARD_CAST]] = bitcast i32 [[A_P1]] to float
 ; CHECK-NEXT:    store i32 [[A_P1]], ptr [[AIDX_NEXT]], align 4
 ; CHECK-NEXT:    [[A:%.*]] = load float, ptr [[AIDX]], align 4
-; CHECK-NEXT:    [[C:%.*]] = fmul float [[A]], 2.000000e+00
+; CHECK-NEXT:    [[C:%.*]] = fmul float [[STORE_FORWARDED]], 2.000000e+00
 ; CHECK-NEXT:    [[C_INT:%.*]] = fptosi float [[C]] to i32
 ; CHECK-NEXT:    store i32 [[C_INT]], ptr [[CIDX]], align 4
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
@@ -61,7 +65,8 @@ for.end:                                          ; preds = %for.body
   ret void
 }
 
-; Don't crash if the store and the load use 
diff erent types.
+; If the store and the load use 
diff erent types, but have the same
+; size then we should still be able to forward the value.
 ;
 ;   for (unsigned i = 0; i < 100; i++) {
 ;     A[i+1] = B[i] + 2;
@@ -72,11 +77,13 @@ for.end:                                          ; preds = %for.body
 define void @f2(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) {
 ; CHECK-LABEL: @f2(
 ; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[LOAD_INITIAL:%.*]] = load float, ptr [[A:%.*]], align 4
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[STORE_FORWARDED:%.*]] = phi float [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
 ; CHECK-NEXT:    [[BIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[CIDX:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[AIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
@@ -84,9 +91,10 @@ define void @f2(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) {
 ; CHECK-NEXT:    [[A_P2:%.*]] = add i32 [[B]], 2
 ; CHECK-NEXT:    store i32 [[A_P2]], ptr [[AIDX_NEXT]], align 4
 ; CHECK-NEXT:    [[A_P3:%.*]] = add i32 [[B]], 3
+; CHECK-NEXT:    [[STORE_FORWARD_CAST]] = bitcast i32 [[A_P3]] to float
 ; CHECK-NEXT:    store i32 [[A_P3]], ptr [[AIDX_NEXT]], align 4
 ; CHECK-NEXT:    [[A:%.*]] = load float, ptr [[AIDX]], align 4
-; CHECK-NEXT:    [[C:%.*]] = fmul float [[A]], 2.000000e+00
+; CHECK-NEXT:    [[C:%.*]] = fmul float [[STORE_FORWARDED]], 2.000000e+00
 ; CHECK-NEXT:    [[C_INT:%.*]] = fptosi float [[C]] to i32
 ; CHECK-NEXT:    store i32 [[C_INT]], ptr [[CIDX]], align 4
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
@@ -125,24 +133,28 @@ for.end:                                          ; preds = %for.body
   ret void
 }
 
-; Don't crash if the store and the load use 
diff erent types.
+; Check that we can forward between pointer-sized integers and actual
+; pointers.
 
 define void @f3(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) {
 ; CHECK-LABEL: @f3(
 ; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[LOAD_INITIAL:%.*]] = load ptr, ptr [[A:%.*]], align 8
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[STORE_FORWARDED:%.*]] = phi ptr [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
 ; CHECK-NEXT:    [[BIDX:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[CIDX:%.*]] = getelementptr inbounds i64, ptr [[C:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[AIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[B:%.*]] = load i64, ptr [[BIDX]], align 8
 ; CHECK-NEXT:    [[A_P1:%.*]] = add i64 [[B]], 2
+; CHECK-NEXT:    [[STORE_FORWARD_CAST]] = inttoptr i64 [[A_P1]] to ptr
 ; CHECK-NEXT:    store i64 [[A_P1]], ptr [[AIDX_NEXT]], align 8
 ; CHECK-NEXT:    [[A:%.*]] = load ptr, ptr [[AIDX]], align 8
-; CHECK-NEXT:    [[C:%.*]] = getelementptr i8, ptr [[A]], i64 57
+; CHECK-NEXT:    [[C:%.*]] = getelementptr i8, ptr [[STORE_FORWARDED]], i64 57
 ; CHECK-NEXT:    [[C_I64P:%.*]] = ptrtoint ptr [[C]] to i64
 ; CHECK-NEXT:    store i64 [[C_I64P]], ptr [[CIDX]], align 8
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
@@ -178,7 +190,9 @@ for.end:                                          ; preds = %for.body
   ret void
 }
 
-; Don't crash if the store and the load use 
diff erent types.
+; If the store and the load use 
diff erent types, but have the same
+; size then we should still be able to forward the value--also for
+; vector types.
 ;
 ;   for (unsigned i = 0; i < 100; i++) {
 ;     A[i+1] = B[i] + 2;
@@ -188,19 +202,22 @@ for.end:                                          ; preds = %for.body
 define void @f4(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) {
 ; CHECK-LABEL: @f4(
 ; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[LOAD_INITIAL:%.*]] = load <2 x half>, ptr [[A:%.*]], align 4
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[STORE_FORWARDED:%.*]] = phi <2 x half> [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV_NEXT]]
 ; CHECK-NEXT:    [[BIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[CIDX:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[AIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[B:%.*]] = load i32, ptr [[BIDX]], align 4
 ; CHECK-NEXT:    [[A_P1:%.*]] = add i32 [[B]], 2
+; CHECK-NEXT:    [[STORE_FORWARD_CAST]] = bitcast i32 [[A_P1]] to <2 x half>
 ; CHECK-NEXT:    store i32 [[A_P1]], ptr [[AIDX_NEXT]], align 4
 ; CHECK-NEXT:    [[A:%.*]] = load <2 x half>, ptr [[AIDX]], align 4
-; CHECK-NEXT:    [[C:%.*]] = fmul <2 x half> [[A]], <half 0xH4000, half 0xH4000>
+; CHECK-NEXT:    [[C:%.*]] = fmul <2 x half> [[STORE_FORWARDED]], <half 0xH4000, half 0xH4000>
 ; CHECK-NEXT:    [[C_INT:%.*]] = bitcast <2 x half> [[C]] to i32
 ; CHECK-NEXT:    store i32 [[C_INT]], ptr [[CIDX]], align 4
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
@@ -235,3 +252,57 @@ for.body:                                         ; preds = %for.body, %entry
 for.end:                                          ; preds = %for.body
   ret void
 }
+
+; Check that we don't forward between integers and actual
+; pointers if sizes don't match.
+
+define void @f5(ptr noalias %A, ptr noalias %B, ptr noalias %C, i64 %N) {
+; CHECK-LABEL: @f5(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT:    [[BIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[CIDX:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[AIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[B:%.*]] = load i32, ptr [[BIDX]], align 4
+; CHECK-NEXT:    [[A_P1:%.*]] = add i32 [[B]], 2
+; CHECK-NEXT:    store i32 [[A_P1]], ptr [[AIDX_NEXT]], align 4
+; CHECK-NEXT:    [[A:%.*]] = load ptr, ptr [[AIDX]], align 8
+; CHECK-NEXT:    [[C:%.*]] = getelementptr i8, ptr [[A]], i32 57
+; CHECK-NEXT:    [[C_I64P:%.*]] = ptrtoint ptr [[C]] to i32
+; CHECK-NEXT:    store i32 [[C_I64P]], ptr [[CIDX]], align 8
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CHECK:       for.end:
+; CHECK-NEXT:    ret void
+;
+entry:
+  br label %for.body
+
+for.body:                                         ; preds = %for.body, %entry
+  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+
+  %Aidx_next = getelementptr inbounds i32, ptr %A, i64 %indvars.iv.next
+  %Bidx = getelementptr inbounds i32, ptr %B, i64 %indvars.iv
+  %Cidx = getelementptr inbounds i32, ptr %C, i64 %indvars.iv
+  %Aidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+
+  %b = load i32, ptr %Bidx, align 4
+  %a_p1 = add i32 %b, 2
+  store i32 %a_p1, ptr %Aidx_next, align 4
+
+  %a = load ptr, ptr %Aidx , align 8
+  %c = getelementptr i8, ptr %a, i32 57
+  %c.i64p = ptrtoint i8* %c to i32
+  store i32 %c.i64p, ptr %Cidx, align 8
+
+  %exitcond = icmp eq i64 %indvars.iv.next, %N
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body
+  ret void
+}

diff  --git a/llvm/test/Transforms/LoopLoadElim/type-mismatch.ll b/llvm/test/Transforms/LoopLoadElim/type-mismatch.ll
index 1702eac4da174..1fceb79b20c07 100644
--- a/llvm/test/Transforms/LoopLoadElim/type-mismatch.ll
+++ b/llvm/test/Transforms/LoopLoadElim/type-mismatch.ll
@@ -1,32 +1,37 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -loop-load-elim -S < %s | FileCheck %s
 
-; Don't crash if the store and the load use 
diff erent types.
+; If the store and the load use 
diff erent types, but have the same
+; size then we should still be able to forward the value.
 ;
 ;   for (unsigned i = 0; i < 100; i++) {
 ;     A[i+1] = B[i] + 2;
 ;     C[i] = ((float*)A)[i] * 2;
 ;   }
 
-target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target datalayout = "e-m:o-p64:64:64-i64:64-f80:128-n8:16:32:64-S128"
 
 define void @f(i32* noalias %A, i32* noalias %B, i32* noalias %C, i64 %N) {
 ; CHECK-LABEL: @f(
 ; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[A1:%.*]] = bitcast i32* [[A:%.*]] to float*
+; CHECK-NEXT:    [[LOAD_INITIAL:%.*]] = load float, float* [[A1]], align 4
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[STORE_FORWARDED:%.*]] = phi float [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
 ; CHECK-NEXT:    [[BIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[CIDX:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[AIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[AIDX_FLOAT:%.*]] = bitcast i32* [[AIDX]] to float*
 ; CHECK-NEXT:    [[B:%.*]] = load i32, i32* [[BIDX]], align 4
 ; CHECK-NEXT:    [[A_P1:%.*]] = add i32 [[B]], 2
+; CHECK-NEXT:    [[STORE_FORWARD_CAST]] = bitcast i32 [[A_P1]] to float
 ; CHECK-NEXT:    store i32 [[A_P1]], i32* [[AIDX_NEXT]], align 4
 ; CHECK-NEXT:    [[A:%.*]] = load float, float* [[AIDX_FLOAT]], align 4
-; CHECK-NEXT:    [[C:%.*]] = fmul float [[A]], 2.000000e+00
+; CHECK-NEXT:    [[C:%.*]] = fmul float [[STORE_FORWARDED]], 2.000000e+00
 ; CHECK-NEXT:    [[C_INT:%.*]] = fptosi float [[C]] to i32
 ; CHECK-NEXT:    store i32 [[C_INT]], i32* [[CIDX]], align 4
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
@@ -63,7 +68,8 @@ for.end:                                          ; preds = %for.body
   ret void
 }
 
-; Don't crash if the store and the load use 
diff erent types.
+; If the store and the load use 
diff erent types, but have the same
+; size then we should still be able to forward the value.
 ;
 ;   for (unsigned i = 0; i < 100; i++) {
 ;     A[i+1] = B[i] + 2;
@@ -74,11 +80,14 @@ for.end:                                          ; preds = %for.body
 define void @f2(i32* noalias %A, i32* noalias %B, i32* noalias %C, i64 %N) {
 ; CHECK-LABEL: @f2(
 ; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[A1:%.*]] = bitcast i32* [[A:%.*]] to float*
+; CHECK-NEXT:    [[LOAD_INITIAL:%.*]] = load float, float* [[A1]], align 4
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[STORE_FORWARDED:%.*]] = phi float [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
 ; CHECK-NEXT:    [[BIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[CIDX:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[AIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
@@ -87,9 +96,10 @@ define void @f2(i32* noalias %A, i32* noalias %B, i32* noalias %C, i64 %N) {
 ; CHECK-NEXT:    [[A_P2:%.*]] = add i32 [[B]], 2
 ; CHECK-NEXT:    store i32 [[A_P2]], i32* [[AIDX_NEXT]], align 4
 ; CHECK-NEXT:    [[A_P3:%.*]] = add i32 [[B]], 3
+; CHECK-NEXT:    [[STORE_FORWARD_CAST]] = bitcast i32 [[A_P3]] to float
 ; CHECK-NEXT:    store i32 [[A_P3]], i32* [[AIDX_NEXT]], align 4
 ; CHECK-NEXT:    [[A:%.*]] = load float, float* [[AIDX_FLOAT]], align 4
-; CHECK-NEXT:    [[C:%.*]] = fmul float [[A]], 2.000000e+00
+; CHECK-NEXT:    [[C:%.*]] = fmul float [[STORE_FORWARDED]], 2.000000e+00
 ; CHECK-NEXT:    [[C_INT:%.*]] = fptosi float [[C]] to i32
 ; CHECK-NEXT:    store i32 [[C_INT]], i32* [[CIDX]], align 4
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
@@ -129,25 +139,30 @@ for.end:                                          ; preds = %for.body
   ret void
 }
 
-; Don't crash if the store and the load use 
diff erent types.
+; Check that we can forward between pointer-sized integers and actual
+; pointers.
 
 define void @f3(i64* noalias %A, i64* noalias %B, i64* noalias %C, i64 %N) {
 ; CHECK-LABEL: @f3(
 ; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[A1:%.*]] = bitcast i64* [[A:%.*]] to i8**
+; CHECK-NEXT:    [[LOAD_INITIAL:%.*]] = load i8*, i8** [[A1]], align 8
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[STORE_FORWARDED:%.*]] = phi i8* [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV_NEXT]]
 ; CHECK-NEXT:    [[BIDX:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[CIDX:%.*]] = getelementptr inbounds i64, i64* [[C:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[AIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[AIDX_I8P:%.*]] = bitcast i64* [[AIDX]] to i8**
 ; CHECK-NEXT:    [[B:%.*]] = load i64, i64* [[BIDX]], align 8
 ; CHECK-NEXT:    [[A_P1:%.*]] = add i64 [[B]], 2
+; CHECK-NEXT:    [[STORE_FORWARD_CAST]] = inttoptr i64 [[A_P1]] to i8*
 ; CHECK-NEXT:    store i64 [[A_P1]], i64* [[AIDX_NEXT]], align 8
 ; CHECK-NEXT:    [[A:%.*]] = load i8*, i8** [[AIDX_I8P]], align 8
-; CHECK-NEXT:    [[C:%.*]] = getelementptr i8, i8* [[A]], i64 57
+; CHECK-NEXT:    [[C:%.*]] = getelementptr i8, i8* [[STORE_FORWARDED]], i64 57
 ; CHECK-NEXT:    [[C_I64P:%.*]] = ptrtoint i8* [[C]] to i64
 ; CHECK-NEXT:    store i64 [[C_I64P]], i64* [[CIDX]], align 8
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
@@ -184,7 +199,9 @@ for.end:                                          ; preds = %for.body
   ret void
 }
 
-; Don't crash if the store and the load use 
diff erent types.
+; If the store and the load use 
diff erent types, but have the same
+; size then we should still be able to forward the value--also for
+; vector types.
 ;
 ;   for (unsigned i = 0; i < 100; i++) {
 ;     A[i+1] = B[i] + 2;
@@ -194,20 +211,24 @@ for.end:                                          ; preds = %for.body
 define void @f4(i32* noalias %A, i32* noalias %B, i32* noalias %C, i64 %N) {
 ; CHECK-LABEL: @f4(
 ; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[A1:%.*]] = bitcast i32* [[A:%.*]] to <2 x half>*
+; CHECK-NEXT:    [[LOAD_INITIAL:%.*]] = load <2 x half>, <2 x half>* [[A1]], align 4
 ; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[STORE_FORWARDED:%.*]] = phi <2 x half> [ [[LOAD_INITIAL]], [[ENTRY:%.*]] ], [ [[STORE_FORWARD_CAST:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
 ; CHECK-NEXT:    [[BIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[CIDX:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[AIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[AIDX_FLOAT:%.*]] = bitcast i32* [[AIDX]] to <2 x half>*
 ; CHECK-NEXT:    [[B:%.*]] = load i32, i32* [[BIDX]], align 4
 ; CHECK-NEXT:    [[A_P1:%.*]] = add i32 [[B]], 2
+; CHECK-NEXT:    [[STORE_FORWARD_CAST]] = bitcast i32 [[A_P1]] to <2 x half>
 ; CHECK-NEXT:    store i32 [[A_P1]], i32* [[AIDX_NEXT]], align 4
 ; CHECK-NEXT:    [[A:%.*]] = load <2 x half>, <2 x half>* [[AIDX_FLOAT]], align 4
-; CHECK-NEXT:    [[C:%.*]] = fmul <2 x half> [[A]], <half 0xH4000, half 0xH4000>
+; CHECK-NEXT:    [[C:%.*]] = fmul <2 x half> [[STORE_FORWARDED]], <half 0xH4000, half 0xH4000>
 ; CHECK-NEXT:    [[C_INT:%.*]] = bitcast <2 x half> [[C]] to i32
 ; CHECK-NEXT:    store i32 [[C_INT]], i32* [[CIDX]], align 4
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
@@ -243,3 +264,59 @@ for.body:                                         ; preds = %for.body, %entry
 for.end:                                          ; preds = %for.body
   ret void
 }
+
+; Check that we don't forward between integers and actual
+; pointers if sizes don't match.
+
+define void @f5(i32* noalias %A, i32* noalias %B, i32* noalias %C, i64 %N) {
+; CHECK-LABEL: @f5(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[AIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV_NEXT]]
+; CHECK-NEXT:    [[BIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[CIDX:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[AIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[AIDX_I8P:%.*]] = bitcast i32* [[AIDX]] to i8**
+; CHECK-NEXT:    [[B:%.*]] = load i32, i32* [[BIDX]], align 4
+; CHECK-NEXT:    [[A_P1:%.*]] = add i32 [[B]], 2
+; CHECK-NEXT:    store i32 [[A_P1]], i32* [[AIDX_NEXT]], align 4
+; CHECK-NEXT:    [[A:%.*]] = load i8*, i8** [[AIDX_I8P]], align 8
+; CHECK-NEXT:    [[C:%.*]] = getelementptr i8, i8* [[A]], i32 57
+; CHECK-NEXT:    [[C_I64P:%.*]] = ptrtoint i8* [[C]] to i32
+; CHECK-NEXT:    store i32 [[C_I64P]], i32* [[CIDX]], align 4
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CHECK:       for.end:
+; CHECK-NEXT:    ret void
+;
+entry:
+  br label %for.body
+
+for.body:                                         ; preds = %for.body, %entry
+  %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+
+  %Aidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
+  %Bidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
+  %Cidx = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
+  %Aidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
+  %Aidx.i8p = bitcast i32* %Aidx to i8**
+
+  %b = load i32, i32* %Bidx, align 4
+  %a_p1 = add i32 %b, 2
+  store i32 %a_p1, i32* %Aidx_next, align 4
+
+  %a = load i8*, i8** %Aidx.i8p, align 8
+  %c = getelementptr i8, i8* %a, i32 57
+  %c.i64p = ptrtoint i8* %c to i32
+  store i32 %c.i64p, i32* %Cidx, align 4
+
+  %exitcond = icmp eq i64 %indvars.iv.next, %N
+  br i1 %exitcond, label %for.end, label %for.body
+
+for.end:                                          ; preds = %for.body
+  ret void
+}


        


More information about the llvm-commits mailing list