[llvm] 11446b0 - [VectorCombine] Fix for non-zero addrspace when creating vector load from scalar load
Bjorn Pettersson via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 13 09:26:46 PDT 2020
Author: Bjorn Pettersson
Date: 2020-08-13T18:25:32+02:00
New Revision: 11446b02c7ec258a55de0259c3447d9ce5d5ac63
URL: https://github.com/llvm/llvm-project/commit/11446b02c7ec258a55de0259c3447d9ce5d5ac63
DIFF: https://github.com/llvm/llvm-project/commit/11446b02c7ec258a55de0259c3447d9ce5d5ac63.diff
LOG: [VectorCombine] Fix for non-zero addrspace when creating vector load from scalar load
This is a fixup to commit 43bdac290663f4424f9fb, to make sure the
address space from the original load pointer is retained in the
vector pointer.
Resolves problem with
Assertion `castIsValid(op, S, Ty) && "Invalid cast!"' failed.
due to address space mismatch.
Reviewed By: spatel
Differential Revision: https://reviews.llvm.org/D85912
Added:
Modified:
llvm/lib/Transforms/Vectorize/VectorCombine.cpp
llvm/test/Transforms/VectorCombine/X86/load.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
index 03fdda093584..67725e30b834 100644
--- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
+++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
@@ -121,15 +121,15 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
if (!isSafeToLoadUnconditionally(PtrOp, VectorTy, Alignment, DL, Load, &DT))
return false;
+ unsigned AS = Load->getPointerAddressSpace();
+
// Original pattern: insertelt undef, load [free casts of] ScalarPtr, 0
- int OldCost = TTI.getMemoryOpCost(Instruction::Load, ScalarTy, Alignment,
- Load->getPointerAddressSpace());
+ int OldCost = TTI.getMemoryOpCost(Instruction::Load, ScalarTy, Alignment, AS);
APInt DemandedElts = APInt::getOneBitSet(VecNumElts, 0);
OldCost += TTI.getScalarizationOverhead(VectorTy, DemandedElts, true, false);
// New pattern: load VecPtr
- int NewCost = TTI.getMemoryOpCost(Instruction::Load, VectorTy, Alignment,
- Load->getPointerAddressSpace());
+ int NewCost = TTI.getMemoryOpCost(Instruction::Load, VectorTy, Alignment, AS);
// We can aggressively convert to the vector form because the backend can
// invert this transform if it does not result in a performance win.
@@ -139,7 +139,7 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
// It is safe and potentially profitable to load a vector directly:
// inselt undef, load Scalar, 0 --> load VecPtr
IRBuilder<> Builder(Load);
- Value *CastedPtr = Builder.CreateBitCast(PtrOp, VectorTy->getPointerTo());
+ Value *CastedPtr = Builder.CreateBitCast(PtrOp, VectorTy->getPointerTo(AS));
LoadInst *VecLd = Builder.CreateAlignedLoad(VectorTy, CastedPtr, Alignment);
replaceValue(I, *VecLd);
++NumVecLoad;
diff --git a/llvm/test/Transforms/VectorCombine/X86/load.ll b/llvm/test/Transforms/VectorCombine/X86/load.ll
index edd1e4af099e..524f48332b7c 100644
--- a/llvm/test/Transforms/VectorCombine/X86/load.ll
+++ b/llvm/test/Transforms/VectorCombine/X86/load.ll
@@ -234,6 +234,19 @@ define <4 x float> @gep00_load_f32_insert_v4f32(<4 x float>* align 16 dereferenc
ret <4 x float> %r
}
+; Should work with addrspace as well.
+
+define <4 x float> @gep00_load_f32_insert_v4f32_addrspace(<4 x float> addrspace(44)* align 16 dereferenceable(16) %p) {
+; CHECK-LABEL: @gep00_load_f32_insert_v4f32_addrspace(
+; CHECK-NEXT: [[R:%.*]] = load <4 x float>, <4 x float> addrspace(44)* [[P:%.*]], align 16
+; CHECK-NEXT: ret <4 x float> [[R]]
+;
+ %gep = getelementptr inbounds <4 x float>, <4 x float> addrspace(44)* %p, i64 0, i64 0
+ %s = load float, float addrspace(44)* %gep, align 16
+ %r = insertelement <4 x float> undef, float %s, i64 0
+ ret <4 x float> %r
+}
+
; If there are enough dereferenceable bytes, we can offset the vector load.
define <8 x i16> @gep01_load_i16_insert_v8i16(<8 x i16>* align 16 dereferenceable(18) %p) {
More information about the llvm-commits
mailing list