[llvm] r345998 - [SystemZ] Rework getInterleavedMemoryOpCost()

Jonas Paulsson via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 2 10:15:36 PDT 2018


Author: jonpa
Date: Fri Nov  2 10:15:36 2018
New Revision: 345998

URL: http://llvm.org/viewvc/llvm-project?rev=345998&view=rev
Log:
[SystemZ] Rework getInterleavedMemoryOpCost()

Model this function more closely after the BasicTTIImpl version, with
separate handling of loads and stores. For loads, the set of actually loaded
vectors is checked.

This makes it more readable and just slightly more accurate generally.

Review: Ulrich Weigand
https://reviews.llvm.org/D53071

Added:
    llvm/trunk/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-02.ll
Modified:
    llvm/trunk/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp

Modified: llvm/trunk/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp?rev=345998&r1=345997&r2=345998&view=diff
==============================================================================
--- llvm/trunk/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp Fri Nov  2 10:15:36 2018
@@ -979,6 +979,11 @@ int SystemZTTIImpl::getMemoryOpCost(unsi
   return  NumOps;
 }
 
+// The generic implementation of getInterleavedMemoryOpCost() is based on
+// adding costs of the memory operations plus all the extracts and inserts
+// needed for using / defining the vector operands. The SystemZ version does
+// roughly the same but bases the computations on vector permutations
+// instead.
 int SystemZTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
                                                unsigned Factor,
                                                ArrayRef<unsigned> Indices,
@@ -993,22 +998,49 @@ int SystemZTTIImpl::getInterleavedMemory
   assert(isa<VectorType>(VecTy) &&
          "Expect a vector type for interleaved memory op");
 
-  int NumWideParts = getNumVectorRegs(VecTy);
+  // Return the ceiling of dividing A by B.
+  auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; };
 
-  // How many source vectors are handled to produce a vectorized operand?
-  int NumElsPerVector = (VecTy->getVectorNumElements() / NumWideParts);
-  int NumSrcParts =
-    ((NumWideParts > NumElsPerVector) ? NumElsPerVector : NumWideParts);
-
-  // A Load group may have gaps.
-  unsigned NumOperands =
-    ((Opcode == Instruction::Load) ? Indices.size() : Factor);
-
-  // Each needed permute takes two vectors as input.
-  if (NumSrcParts > 1)
-    NumSrcParts--;
-  int NumPermutes = NumSrcParts * NumOperands;
+  unsigned NumElts = VecTy->getVectorNumElements();
+  assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
+  unsigned VF = NumElts / Factor;
+  unsigned NumEltsPerVecReg = (128U / getScalarSizeInBits(VecTy));
+  unsigned NumVectorMemOps = getNumVectorRegs(VecTy);
+  unsigned NumPermutes = 0;
+
+  if (Opcode == Instruction::Load) {
+    // Loading interleave groups may have gaps, which may mean fewer
+    // loads. Find out how many vectors will be loaded in total, and in how
+    // many of them each value will be in.
+    BitVector UsedInsts(NumVectorMemOps, false);
+    std::vector<BitVector> ValueVecs(Factor, BitVector(NumVectorMemOps, false));
+    for (unsigned Index : Indices)
+      for (unsigned Elt = 0; Elt < VF; ++Elt) {
+        unsigned Vec = (Index + Elt * Factor) / NumEltsPerVecReg;
+        UsedInsts.set(Vec);
+        ValueVecs[Index].set(Vec);
+      }
+    NumVectorMemOps = UsedInsts.count();
+
+    for (unsigned Index : Indices) {
+      // Estimate that each loaded source vector containing this Index
+      // requires one operation, except that vperm can handle two input
+      // registers first time for each dst vector.
+      unsigned NumSrcVecs = ValueVecs[Index].count();
+      unsigned NumDstVecs = ceil(VF * getScalarSizeInBits(VecTy), 128U);
+      assert (NumSrcVecs >= NumDstVecs && "Expected at least as many sources");
+      NumPermutes += std::max(1U, NumSrcVecs - NumDstVecs);
+    }
+  } else {
+    // Estimate the permutes for each stored vector as the smaller of the
+    // number of elements and the number of source vectors. Subtract one per
+    // dst vector for vperm (S.A.).
+    unsigned NumSrcVecs = std::min(NumEltsPerVecReg, Factor);
+    unsigned NumDstVecs = NumVectorMemOps;
+    assert (NumSrcVecs > 1 && "Expected at least two source vectors.");
+    NumPermutes += (NumDstVecs * NumSrcVecs) - NumDstVecs;
+  }
 
   // Cost of load/store operations and the permutations needed.
-  return NumWideParts + NumPermutes;
+  return NumVectorMemOps + NumPermutes;
 }

Added: llvm/trunk/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-02.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-02.ll?rev=345998&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-02.ll (added)
+++ llvm/trunk/test/Transforms/LoopVectorize/SystemZ/mem-interleaving-costs-02.ll Fri Nov  2 10:15:36 2018
@@ -0,0 +1,149 @@
+; REQUIRES: asserts
+; RUN: opt -mtriple=s390x-unknown-linux -mcpu=z13 -loop-vectorize \
+; RUN:   -debug-only=loop-vectorize,vectorutils -max-interleave-group-factor=64\
+; RUN:   -disable-output < %s 2>&1 | FileCheck %s
+;
+; Check that some cost estimations for interleave groups make sense.
+
+; This loop is loading four i16 values at indices [0, 1, 2, 3], with a stride
+; of 4. At VF=4, memory interleaving means loading 4 * 4 * 16 bits = 2 vector
+; registers. Each of the 4 vector values must then be constructed from the
+; two vector registers using one vperm each, which gives a cost of 2 + 4 = 6.
+;
+; CHECK: LV: Checking a loop in "fun0"
+; CHECK: LV: Found an estimated cost of 6 for VF 4 For instruction:   %ld0 = load i16
+; CHECK: LV: Found an estimated cost of 0 for VF 4 For instruction:   %ld1 = load i16
+; CHECK: LV: Found an estimated cost of 0 for VF 4 For instruction:   %ld2 = load i16
+; CHECK: LV: Found an estimated cost of 0 for VF 4 For instruction:   %ld3 = load i16
+define void @fun0(i16 *%ptr, i16 *%dst) {
+entry:
+  br label %for.body
+
+for.body:
+  %ivptr = phi i16* [ %ptr.next, %for.body ], [ %ptr, %entry ]
+  %iv = phi i64 [ %inc, %for.body ], [ 0, %entry ]
+  %inc = add i64 %iv, 4
+  %ptr0 = getelementptr inbounds i16, i16* %ivptr, i64 0
+  %ld0 = load i16, i16* %ptr0
+  %ptr1 = getelementptr inbounds i16, i16* %ivptr, i64 1
+  %ld1 = load i16, i16* %ptr1
+  %ptr2 = getelementptr inbounds i16, i16* %ivptr, i64 2
+  %ld2 = load i16, i16* %ptr2
+  %ptr3 = getelementptr inbounds i16, i16* %ivptr, i64 3
+  %ld3 = load i16, i16* %ptr3
+  %a1 = add i16 %ld0, %ld1
+  %a2 = add i16 %a1, %ld2
+  %a3 = add i16 %a2, %ld3
+  %dstptr = getelementptr inbounds i16, i16* %dst, i64 %iv
+  store i16 %a3, i16* %dstptr
+  %ptr.next = getelementptr inbounds i16, i16* %ivptr, i64 4
+  %cmp = icmp eq i64 %inc, 100
+  br i1 %cmp, label %for.end, label %for.body
+
+for.end:
+  ret void
+}
+
+; This loop loads one i8 value in a stride of 3. At VF=16, this means loading
+; 3 vector registers, and then constructing the vector value with two vperms,
+; which gives a cost of 5.
+;
+; CHECK: LV: Checking a loop in "fun1"
+; CHECK: LV: Found an estimated cost of 5 for VF 16 For instruction:   %ld0 = load i8
+define void @fun1(i8 *%ptr, i8 *%dst) {
+entry:
+  br label %for.body
+
+for.body:
+  %ivptr = phi i8* [ %ptr.next, %for.body ], [ %ptr, %entry ]
+  %iv = phi i64 [ %inc, %for.body ], [ 0, %entry ]
+  %inc = add i64 %iv, 4
+  %ptr0 = getelementptr inbounds i8, i8* %ivptr, i64 0
+  %ld0 = load i8, i8* %ptr0
+  %dstptr = getelementptr inbounds i8, i8* %dst, i64 %iv
+  store i8 %ld0, i8* %dstptr
+  %ptr.next = getelementptr inbounds i8, i8* %ivptr, i64 3
+  %cmp = icmp eq i64 %inc, 100
+  br i1 %cmp, label %for.end, label %for.body
+
+for.end:
+  ret void
+}
+
+; This loop is loading 4 i8 values at indexes [0, 1, 2, 3], with a stride of
+; 32. At VF=2, this means loading 2 vector registers, and using 4 vperms to
+; produce the vector values, which gives a cost of 6.
+;
+; CHECK: LV: Checking a loop in "fun2"
+; CHECK: LV: Found an estimated cost of 6 for VF 2 For instruction:   %ld0 = load i8
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction:   %ld1 = load i8
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction:   %ld2 = load i8
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction:   %ld3 = load i8
+define void @fun2(i8 *%ptr, i8 *%dst) {
+entry:
+  br label %for.body
+
+for.body:
+  %ivptr = phi i8* [ %ptr.next, %for.body ], [ %ptr, %entry ]
+  %iv = phi i64 [ %inc, %for.body ], [ 0, %entry ]
+  %inc = add i64 %iv, 4
+  %ptr0 = getelementptr inbounds i8, i8* %ivptr, i64 0
+  %ld0 = load i8, i8* %ptr0
+  %ptr1 = getelementptr inbounds i8, i8* %ivptr, i64 1
+  %ld1 = load i8, i8* %ptr1
+  %ptr2 = getelementptr inbounds i8, i8* %ivptr, i64 2
+  %ld2 = load i8, i8* %ptr2
+  %ptr3 = getelementptr inbounds i8, i8* %ivptr, i64 3
+  %ld3 = load i8, i8* %ptr3
+  %a1 = add i8 %ld0, %ld1
+  %a2 = add i8 %a1, %ld2
+  %a3 = add i8 %a2, %ld3
+  %dstptr = getelementptr inbounds i8, i8* %dst, i64 %iv
+  store i8 %a3, i8* %dstptr
+  %ptr.next = getelementptr inbounds i8, i8* %ivptr, i64 32
+  %cmp = icmp eq i64 %inc, 100
+  br i1 %cmp, label %for.end, label %for.body
+
+for.end:
+  ret void
+}
+
+; This loop is loading 4 i8 values at indexes [0, 1, 2, 3], with a stride of
+; 30. At VF=2, this means loading 3 vector registers, and using 4 vperms to
+; produce the vector values, which gives a cost of 7. This is the same loop
+; as in fun2, except the stride makes the second iterations values overlap a
+; vector register boundary.
+;
+; CHECK: LV: Checking a loop in "fun3"
+; CHECK: LV: Found an estimated cost of 7 for VF 2 For instruction:   %ld0 = load i8
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction:   %ld1 = load i8
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction:   %ld2 = load i8
+; CHECK: LV: Found an estimated cost of 0 for VF 2 For instruction:   %ld3 = load i8
+define void @fun3(i8 *%ptr, i8 *%dst) {
+entry:
+  br label %for.body
+
+for.body:
+  %ivptr = phi i8* [ %ptr.next, %for.body ], [ %ptr, %entry ]
+  %iv = phi i64 [ %inc, %for.body ], [ 0, %entry ]
+  %inc = add i64 %iv, 4
+  %ptr0 = getelementptr inbounds i8, i8* %ivptr, i64 0
+  %ld0 = load i8, i8* %ptr0
+  %ptr1 = getelementptr inbounds i8, i8* %ivptr, i64 1
+  %ld1 = load i8, i8* %ptr1
+  %ptr2 = getelementptr inbounds i8, i8* %ivptr, i64 2
+  %ld2 = load i8, i8* %ptr2
+  %ptr3 = getelementptr inbounds i8, i8* %ivptr, i64 3
+  %ld3 = load i8, i8* %ptr3
+  %a1 = add i8 %ld0, %ld1
+  %a2 = add i8 %a1, %ld2
+  %a3 = add i8 %a2, %ld3
+  %dstptr = getelementptr inbounds i8, i8* %dst, i64 %iv
+  store i8 %a3, i8* %dstptr
+  %ptr.next = getelementptr inbounds i8, i8* %ivptr, i64 30
+  %cmp = icmp eq i64 %inc, 100
+  br i1 %cmp, label %for.end, label %for.body
+
+for.end:
+  ret void
+}




More information about the llvm-commits mailing list