[llvm] r288410 - Refactored X86InterleavedAccess into a class. NFCI.

David L Kreitzer via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 1 11:56:39 PST 2016


Author: dlkreitz
Date: Thu Dec  1 13:56:39 2016
New Revision: 288410

URL: http://llvm.org/viewvc/llvm-project?rev=288410&view=rev
Log:
Refactored X86InterleavedAccess into a class. NFCI.

Patch by Farhana Aleen

Differential Revision: https://reviews.llvm.org/D25986

Modified:
    llvm/trunk/lib/Target/X86/X86InterleavedAccess.cpp

Modified: llvm/trunk/lib/Target/X86/X86InterleavedAccess.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InterleavedAccess.cpp?rev=288410&r1=288409&r2=288410&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InterleavedAccess.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InterleavedAccess.cpp Thu Dec  1 13:56:39 2016
@@ -1,117 +1,221 @@
-//===------- X86InterleavedAccess.cpp --------------===//
+//===--------- X86InterleavedAccess.cpp ----------------------------------===//
 //
 //                     The LLVM Compiler Infrastructure
 //
 // This file is distributed under the University of Illinois Open Source
 // License. See LICENSE.TXT for details.
 //
-//===----------------------------------------------------------------------===//
-//
-// This file contains the X86 implementation of the interleaved accesses
-// optimization generating X86-specific instructions/intrinsics for interleaved
-// access groups.
-//
-//===----------------------------------------------------------------------===//
+//===--------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the X86 implementation of the interleaved accesses
+/// optimization generating X86-specific instructions/intrinsics for
+/// interleaved access groups.
+///
+//===--------------------------------------------------------------------===//
 
 #include "X86ISelLowering.h"
 #include "X86TargetMachine.h"
 
 using namespace llvm;
 
-/// Returns true if the interleaved access group represented by the shuffles
-/// is supported for the subtarget. Returns false otherwise.
-static bool isSupported(const X86Subtarget &SubTarget,
-                        const LoadInst *LI,
-                        const ArrayRef<ShuffleVectorInst *> &Shuffles,
-                        unsigned Factor) {
+/// \brief This class holds necessary information to represent an interleaved
+/// access group and supports utilities to lower the group into
+/// X86-specific instructions/intrinsics.
+///  E.g. A group of interleaving access loads (Factor = 2; accessing every
+///       other element)
+///        %wide.vec = load <8 x i32>, <8 x i32>* %ptr
+///        %v0 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <0, 2, 4, 6>
+///        %v1 = shuffle <8 x i32> %wide.vec, <8 x i32> undef, <1, 3, 5, 7>
+
+class X86InterleavedAccessGroup {
+  /// \brief Reference to the wide-load instruction of an interleaved access
+  /// group.
+  Instruction *const Inst;
+
+  /// \brief Reference to the shuffle(s), consumer(s) of the (load) 'Inst'.
+  ArrayRef<ShuffleVectorInst *> Shuffles;
+
+  /// \brief Reference to the starting index of each user-shuffle.
+  ArrayRef<unsigned> Indices;
+
+  /// \brief Reference to the interleaving stride in terms of elements.
+  const unsigned Factor;
+
+  /// \brief Reference to the underlying target.
+  const X86Subtarget &Subtarget;
+
+  const DataLayout &DL;
+
+  IRBuilder<> &Builder;
+
+  /// \brief Breaks down a vector \p 'Inst' of N elements into \p NumSubVectors
+  /// sub vectors of type \p T. Returns true and the sub-vectors in
+  /// \p DecomposedVectors if it decomposes the Inst, returns false otherwise.
+  bool decompose(Instruction *Inst, unsigned NumSubVectors, VectorType *T,
+                 SmallVectorImpl<Instruction *> &DecomposedVectors);
+
+  /// \brief Performs matrix transposition on a 4x4 matrix \p InputVectors and
+  /// returns the transposed-vectors in \p TransposedVectors.
+  /// E.g.
+  /// InputVectors:
+  ///   In-V0 = p1, p2, p3, p4
+  ///   In-V1 = q1, q2, q3, q4
+  ///   In-V2 = r1, r2, r3, r4
+  ///   In-V3 = s1, s2, s3, s4
+  /// OutputVectors:
+  ///   Out-V0 = p1, q1, r1, s1
+  ///   Out-V1 = p2, q2, r2, s2
+  ///   Out-V2 = p3, q3, r3, s3
+  ///   Out-V3 = P4, q4, r4, s4
+  void transpose_4x4(ArrayRef<Instruction *> InputVectors,
+                     SmallVectorImpl<Value *> &TrasposedVectors);
+
+public:
+  /// In order to form an interleaved access group X86InterleavedAccessGroup
+  /// requires a wide-load instruction \p 'I', a group of interleaved-vectors
+  /// \p Shuffs, reference to the first indices of each interleaved-vector
+  /// \p 'Ind' and the interleaving stride factor \p F. In order to generate
+  /// X86-specific instructions/intrinsics it also requires the underlying
+  /// target information \p STarget.
+  explicit X86InterleavedAccessGroup(Instruction *I,
+                                     ArrayRef<ShuffleVectorInst *> Shuffs,
+                                     ArrayRef<unsigned> Ind,
+                                     const unsigned F,
+                                     const X86Subtarget &STarget,
+                                     IRBuilder<> &B)
+      : Inst(I), Shuffles(Shuffs), Indices(Ind), Factor(F), Subtarget(STarget),
+        DL(Inst->getModule()->getDataLayout()), Builder(B) {}
+
+  /// \brief Returns true if this interleaved access group can be lowered into
+  /// x86-specific instructions/intrinsics, false otherwise.
+  bool isSupported() const;
+
+  /// \brief Lowers this interleaved access group into X86-specific
+  /// instructions/intrinsics.
+  bool lowerIntoOptimizedSequence();
+};
 
-  const DataLayout &DL = Shuffles[0]->getModule()->getDataLayout();
+bool X86InterleavedAccessGroup::isSupported() const {
   VectorType *ShuffleVecTy = Shuffles[0]->getType();
-  unsigned ShuffleVecSize = DL.getTypeSizeInBits(ShuffleVecTy);
+  uint64_t ShuffleVecSize = DL.getTypeSizeInBits(ShuffleVecTy);
   Type *ShuffleEltTy = ShuffleVecTy->getVectorElementType();
 
-  if (DL.getTypeSizeInBits(LI->getType()) < Factor * ShuffleVecSize)
+  if (DL.getTypeSizeInBits(Inst->getType()) < Factor * ShuffleVecSize)
     return false;
 
   // Currently, lowering is supported for 64 bits on AVX.
-  if (!SubTarget.hasAVX() || ShuffleVecSize != 256 ||
-      DL.getTypeSizeInBits(ShuffleEltTy) != 64 ||
-      Factor != 4)
+  if (!Subtarget.hasAVX() || ShuffleVecSize != 256 ||
+      DL.getTypeSizeInBits(ShuffleEltTy) != 64 || Factor != 4)
     return false;
 
   return true;
 }
 
-/// \brief Lower interleaved load(s) into target specific instructions/
-/// intrinsics. Lowering sequence varies depending on the vector-types, factor,
-/// number of shuffles and ISA.
-/// Currently, lowering is supported for 4x64 bits with Factor = 4 on AVX.
-bool X86TargetLowering::lowerInterleavedLoad(
-    LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
-    ArrayRef<unsigned> Indices, unsigned Factor) const {
-  assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
-         "Invalid interleave factor");
-  assert(!Shuffles.empty() && "Empty shufflevector input");
-  assert(Shuffles.size() == Indices.size() &&
-         "Unmatched number of shufflevectors and indices");
+bool X86InterleavedAccessGroup::decompose(
+    Instruction *VecInst, unsigned NumSubVectors, VectorType *SubVecTy,
+    SmallVectorImpl<Instruction *> &DecomposedVectors) {
+  Type *VecTy = VecInst->getType();
+
+  assert(VecTy->isVectorTy() &&
+         DL.getTypeSizeInBits(VecTy) >=
+             DL.getTypeSizeInBits(SubVecTy) * NumSubVectors &&
+         "Invalid Inst-size!!!");
+  assert(VecTy->getVectorElementType() == SubVecTy->getVectorElementType() &&
+         "Element type mismatched!!!");
 
-  if (!isSupported(Subtarget, LI, Shuffles, Factor))
+  if (!isa<LoadInst>(VecInst))
     return false;
 
-  VectorType *ShuffleVecTy = Shuffles[0]->getType();
-
-  Type *VecBasePtrTy = ShuffleVecTy->getPointerTo(LI->getPointerAddressSpace());
-
-  IRBuilder<> Builder(LI);
-  SmallVector<Instruction *, 4> NewLoads;
-  SmallVector<Value *, 4> NewShuffles;
-  NewShuffles.resize(Factor);
+  LoadInst *LI = cast<LoadInst>(VecInst);
+  Type *VecBasePtrTy = SubVecTy->getPointerTo(LI->getPointerAddressSpace());
 
   Value *VecBasePtr =
       Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy);
 
-  // Generate 4 loads of type v4xT64
-  for (unsigned Part = 0; Part < Factor; Part++) {
+  // Generate N loads of T type
+  for (unsigned i = 0; i < NumSubVectors; i++) {
     // TODO: Support inbounds GEP
-    Value *NewBasePtr =
-        Builder.CreateGEP(VecBasePtr, Builder.getInt32(Part));
+    Value *NewBasePtr = Builder.CreateGEP(VecBasePtr, Builder.getInt32(i));
     Instruction *NewLoad =
         Builder.CreateAlignedLoad(NewBasePtr, LI->getAlignment());
-    NewLoads.push_back(NewLoad);
+    DecomposedVectors.push_back(NewLoad);
   }
 
+  return true;
+}
+
+void X86InterleavedAccessGroup::transpose_4x4(
+    ArrayRef<Instruction *> Matrix,
+    SmallVectorImpl<Value *> &TransposedMatrix) {
+  assert(Matrix.size() == 4 && "Invalid matrix size");
+  TransposedMatrix.resize(4);
+
   // dst = src1[0,1],src2[0,1]
   uint32_t IntMask1[] = {0, 1, 4, 5};
-  ArrayRef<unsigned int> ShuffleMask = makeArrayRef(IntMask1, 4);
-  Value *IntrVec1 =
-      Builder.CreateShuffleVector(NewLoads[0], NewLoads[2], ShuffleMask);
-  Value *IntrVec2 =
-      Builder.CreateShuffleVector(NewLoads[1], NewLoads[3], ShuffleMask);
+  ArrayRef<uint32_t> Mask = makeArrayRef(IntMask1, 4);
+  Value *IntrVec1 = Builder.CreateShuffleVector(Matrix[0], Matrix[2], Mask);
+  Value *IntrVec2 = Builder.CreateShuffleVector(Matrix[1], Matrix[3], Mask);
 
   // dst = src1[2,3],src2[2,3]
   uint32_t IntMask2[] = {2, 3, 6, 7};
-  ShuffleMask = makeArrayRef(IntMask2, 4);
-  Value *IntrVec3 =
-      Builder.CreateShuffleVector(NewLoads[0], NewLoads[2], ShuffleMask);
-  Value *IntrVec4 =
-      Builder.CreateShuffleVector(NewLoads[1], NewLoads[3], ShuffleMask);
+  Mask = makeArrayRef(IntMask2, 4);
+  Value *IntrVec3 = Builder.CreateShuffleVector(Matrix[0], Matrix[2], Mask);
+  Value *IntrVec4 = Builder.CreateShuffleVector(Matrix[1], Matrix[3], Mask);
 
   // dst = src1[0],src2[0],src1[2],src2[2]
   uint32_t IntMask3[] = {0, 4, 2, 6};
-  ShuffleMask = makeArrayRef(IntMask3, 4);
-  NewShuffles[0] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, ShuffleMask);
-  NewShuffles[2] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, ShuffleMask);
+  Mask = makeArrayRef(IntMask3, 4);
+  TransposedMatrix[0] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, Mask);
+  TransposedMatrix[2] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, Mask);
 
   // dst = src1[1],src2[1],src1[3],src2[3]
   uint32_t IntMask4[] = {1, 5, 3, 7};
-  ShuffleMask = makeArrayRef(IntMask4, 4);
-  NewShuffles[1] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, ShuffleMask);
-  NewShuffles[3] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, ShuffleMask);
-
-  for (unsigned i = 0; i < Shuffles.size(); i++) {
-    unsigned Index = Indices[i];
-    Shuffles[i]->replaceAllUsesWith(NewShuffles[Index]);
-  }
+  Mask = makeArrayRef(IntMask4, 4);
+  TransposedMatrix[1] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, Mask);
+  TransposedMatrix[3] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, Mask);
+}
+
+// Lowers this interleaved access group into X86-specific
+// instructions/intrinsics.
+bool X86InterleavedAccessGroup::lowerIntoOptimizedSequence() {
+  SmallVector<Instruction *, 4> DecomposedVectors;
+  VectorType *VecTy = Shuffles[0]->getType();
+  // Try to generate target-sized register(/instruction).
+  if (!decompose(Inst, Factor, VecTy, DecomposedVectors))
+    return false;
+
+  SmallVector<Value *, 4> TransposedVectors;
+  // Perform matrix-transposition in order to compute interleaved
+  // results by generating some sort of (optimized) target-specific
+  // instructions.
+  transpose_4x4(DecomposedVectors, TransposedVectors);
+
+  // Now replace the unoptimized-interleaved-vectors with the
+  // transposed-interleaved vectors.
+  for (unsigned i = 0; i < Shuffles.size(); i++)
+    Shuffles[i]->replaceAllUsesWith(TransposedVectors[Indices[i]]);
 
   return true;
 }
+
+// Lower interleaved load(s) into target specific instructions/
+// intrinsics. Lowering sequence varies depending on the vector-types, factor,
+// number of shuffles and ISA.
+// Currently, lowering is supported for 4x64 bits with Factor = 4 on AVX.
+bool X86TargetLowering::lowerInterleavedLoad(
+    LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
+    ArrayRef<unsigned> Indices, unsigned Factor) const {
+  assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
+         "Invalid interleave factor");
+  assert(!Shuffles.empty() && "Empty shufflevector input");
+  assert(Shuffles.size() == Indices.size() &&
+         "Unmatched number of shufflevectors and indices");
+
+  // Create an interleaved access group.
+  IRBuilder<> Builder(LI);
+  X86InterleavedAccessGroup Grp(LI, Shuffles, Indices, Factor, Subtarget,
+                                Builder);
+
+  return Grp.isSupported() && Grp.lowerIntoOptimizedSequence();
+}




More information about the llvm-commits mailing list