[llvm] 5b84ee4 - [Alignment] Fix misaligned interleaved loads

Guillaume Chatelet via llvm-commits llvm-commits at lists.llvm.org
Wed May 27 05:16:00 PDT 2020


Author: Guillaume Chatelet
Date: 2020-05-27T12:12:22Z
New Revision: 5b84ee4f61419b9a911ce75b4bc1c5cc7de1d0d6

URL: https://github.com/llvm/llvm-project/commit/5b84ee4f61419b9a911ce75b4bc1c5cc7de1d0d6
DIFF: https://github.com/llvm/llvm-project/commit/5b84ee4f61419b9a911ce75b4bc1c5cc7de1d0d6.diff

LOG: [Alignment] Fix misaligned interleaved loads

Summary: Tentatively fixing https://bugs.llvm.org/show_bug.cgi?id=45957

Reviewers: craig.topper, nlopes

Subscribers: hiraditya, llvm-commits, RKSimon, jdoerfert, efriedma

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D80276

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86InterleavedAccess.cpp
    llvm/test/Transforms/InterleavedAccess/X86/interleavedLoad.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86InterleavedAccess.cpp b/llvm/lib/Target/X86/X86InterleavedAccess.cpp
index de84874f715d..72a37a9ddeb9 100644
--- a/llvm/lib/Target/X86/X86InterleavedAccess.cpp
+++ b/llvm/lib/Target/X86/X86InterleavedAccess.cpp
@@ -150,7 +150,7 @@ bool X86InterleavedAccessGroup::isSupported() const {
   // We support shuffle represents stride 4 for byte type with size of
   // WideInstSize.
   if (ShuffleElemSize == 64 && WideInstSize == 1024 && Factor == 4)
-     return true;
+    return true;
 
   if (ShuffleElemSize == 8 && isa<StoreInst>(Inst) && Factor == 4 &&
       (WideInstSize == 256 || WideInstSize == 512 || WideInstSize == 1024 ||
@@ -211,13 +211,20 @@ void X86InterleavedAccessGroup::decompose(
     VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy);
   }
   // Generate N loads of T type.
+  assert(VecBaseTy->getPrimitiveSizeInBits().isByteSized() &&
+         "VecBaseTy's size must be a multiple of 8");
+  const Align FirstAlignment = LI->getAlign();
+  const Align SubsequentAlignment = commonAlignment(
+      FirstAlignment, VecBaseTy->getPrimitiveSizeInBits().getFixedSize() / 8);
+  Align Alignment = FirstAlignment;
   for (unsigned i = 0; i < NumLoads; i++) {
     // TODO: Support inbounds GEP.
     Value *NewBasePtr =
         Builder.CreateGEP(VecBaseTy, VecBasePtr, Builder.getInt32(i));
     Instruction *NewLoad =
-        Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, LI->getAlign());
+        Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, Alignment);
     DecomposedVectors.push_back(NewLoad);
+    Alignment = SubsequentAlignment;
   }
 }
 
@@ -255,7 +262,7 @@ static void genShuffleBland(MVT VT, ArrayRef<int> Mask,
                             SmallVectorImpl<int> &Out, int LowOffset,
                             int HighOffset) {
   assert(VT.getSizeInBits() >= 256 &&
-    "This function doesn't accept width smaller then 256");
+         "This function doesn't accept width smaller then 256");
   unsigned NumOfElm = VT.getVectorNumElements();
   for (unsigned i = 0; i < Mask.size(); i++)
     Out.push_back(Mask[i] + LowOffset);
@@ -289,7 +296,7 @@ static void reorderSubVector(MVT VT, SmallVectorImpl<Value *> &TransposedMatrix,
   if (VecElems == 16) {
     for (unsigned i = 0; i < Stride; i++)
       TransposedMatrix[i] = Builder.CreateShuffleVector(
-        Vec[i], UndefValue::get(Vec[i]->getType()), VPShuf);
+          Vec[i], UndefValue::get(Vec[i]->getType()), VPShuf);
     return;
   }
 
@@ -298,20 +305,19 @@ static void reorderSubVector(MVT VT, SmallVectorImpl<Value *> &TransposedMatrix,
 
   for (unsigned i = 0; i < (VecElems / 16) * Stride; i += 2) {
     genShuffleBland(VT, VPShuf, OptimizeShuf, (i / Stride) * 16,
-      (i + 1) / Stride * 16);
+                    (i + 1) / Stride * 16);
     Temp[i / 2] = Builder.CreateShuffleVector(
-      Vec[i % Stride], Vec[(i + 1) % Stride], OptimizeShuf);
+        Vec[i % Stride], Vec[(i + 1) % Stride], OptimizeShuf);
     OptimizeShuf.clear();
   }
 
   if (VecElems == 32) {
     std::copy(Temp, Temp + Stride, TransposedMatrix.begin());
     return;
-  }
-  else
+  } else
     for (unsigned i = 0; i < Stride; i++)
       TransposedMatrix[i] =
-      Builder.CreateShuffleVector(Temp[2 * i], Temp[2 * i + 1], Concat);
+          Builder.CreateShuffleVector(Temp[2 * i], Temp[2 * i + 1], Concat);
 }
 
 void X86InterleavedAccessGroup::interleave8bitStride4VF8(
@@ -682,7 +688,7 @@ void X86InterleavedAccessGroup::interleave8bitStride3(
 
   unsigned NumOfElm = VT.getVectorNumElements();
   group2Shuffle(VT, GroupSize, VPShuf);
-  reorderSubVector(VT, TransposedMatrix, Vec, VPShuf, NumOfElm,3, Builder);
+  reorderSubVector(VT, TransposedMatrix, Vec, VPShuf, NumOfElm, 3, Builder);
 }
 
 void X86InterleavedAccessGroup::transpose_4x4(

diff  --git a/llvm/test/Transforms/InterleavedAccess/X86/interleavedLoad.ll b/llvm/test/Transforms/InterleavedAccess/X86/interleavedLoad.ll
index 9705b438fa39..aa682ddcb381 100644
--- a/llvm/test/Transforms/InterleavedAccess/X86/interleavedLoad.ll
+++ b/llvm/test/Transforms/InterleavedAccess/X86/interleavedLoad.ll
@@ -8,15 +8,15 @@ define <32 x i8> @interleaved_load_vf32_i8_stride3(<96 x i8>* %ptr){
 ; AVX2-NEXT:    [[TMP2:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 0
 ; AVX2-NEXT:    [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[TMP2]], align 128
 ; AVX2-NEXT:    [[TMP4:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 1
-; AVX2-NEXT:    [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[TMP4]], align 128
+; AVX2-NEXT:    [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[TMP4]], align 16
 ; AVX2-NEXT:    [[TMP6:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 2
-; AVX2-NEXT:    [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* [[TMP6]], align 128
+; AVX2-NEXT:    [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* [[TMP6]], align 16
 ; AVX2-NEXT:    [[TMP8:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 3
-; AVX2-NEXT:    [[TMP9:%.*]] = load <16 x i8>, <16 x i8>* [[TMP8]], align 128
+; AVX2-NEXT:    [[TMP9:%.*]] = load <16 x i8>, <16 x i8>* [[TMP8]], align 16
 ; AVX2-NEXT:    [[TMP10:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 4
-; AVX2-NEXT:    [[TMP11:%.*]] = load <16 x i8>, <16 x i8>* [[TMP10]], align 128
+; AVX2-NEXT:    [[TMP11:%.*]] = load <16 x i8>, <16 x i8>* [[TMP10]], align 16
 ; AVX2-NEXT:    [[TMP12:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 5
-; AVX2-NEXT:    [[TMP13:%.*]] = load <16 x i8>, <16 x i8>* [[TMP12]], align 128
+; AVX2-NEXT:    [[TMP13:%.*]] = load <16 x i8>, <16 x i8>* [[TMP12]], align 16
 ; AVX2-NEXT:    [[TMP14:%.*]] = shufflevector <16 x i8> [[TMP3]], <16 x i8> [[TMP9]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
 ; AVX2-NEXT:    [[TMP15:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> [[TMP11]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
 ; AVX2-NEXT:    [[TMP16:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP13]], <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
@@ -50,9 +50,9 @@ define <16 x i8> @interleaved_load_vf16_i8_stride3(<48 x i8>* %ptr){
 ; AVX2-NEXT:    [[TMP2:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 0
 ; AVX2-NEXT:    [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[TMP2]], align 64
 ; AVX2-NEXT:    [[TMP4:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 1
-; AVX2-NEXT:    [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[TMP4]], align 64
+; AVX2-NEXT:    [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[TMP4]], align 16
 ; AVX2-NEXT:    [[TMP6:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 2
-; AVX2-NEXT:    [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* [[TMP6]], align 64
+; AVX2-NEXT:    [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* [[TMP6]], align 16
 ; AVX2-NEXT:    [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP3]], <16 x i8> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 2, i32 5, i32 8, i32 11, i32 14, i32 1, i32 4, i32 7, i32 10, i32 13>
 ; AVX2-NEXT:    [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 2, i32 5, i32 8, i32 11, i32 14, i32 1, i32 4, i32 7, i32 10, i32 13>
 ; AVX2-NEXT:    [[TMP10:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 2, i32 5, i32 8, i32 11, i32 14, i32 1, i32 4, i32 7, i32 10, i32 13>


        


More information about the llvm-commits mailing list