[PATCH] D72440: [ARM][MVE] Don't unroll intrinsic loops.

Sam Parker via Phabricator via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 9 02:39:08 PST 2020


samparker created this revision.
samparker added reviewers: dmgreen, SjoerdMeijer.
Herald added subscribers: zzheng, hiraditya, kristof.beyls.
Herald added a project: LLVM.

We don't unroll vector loops for MVE targets, but we miss the case when loops only contain intrinsic calls. So just move the logic a bit to catch this case.


https://reviews.llvm.org/D72440

Files:
  llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
  llvm/test/Transforms/LoopUnroll/ARM/mve-nounroll.ll


Index: llvm/test/Transforms/LoopUnroll/ARM/mve-nounroll.ll
===================================================================
--- llvm/test/Transforms/LoopUnroll/ARM/mve-nounroll.ll
+++ llvm/test/Transforms/LoopUnroll/ARM/mve-nounroll.ll
@@ -121,6 +121,55 @@
   ret void
 }
 
+; Test that we don't unroll loops that only contain vector intrinsics.
+; CHECK-LABEL: test_intrinsics
+; CHECK: call <16 x i8> @llvm.arm.mve.sub
+; CHECK-NOT: call <16 x i8> @llvm.arm.mve.sub
+define dso_local arm_aapcs_vfpcc void @test_intrinsics(i8* noalias nocapture readonly %a, i8* noalias nocapture readonly %b, i8* noalias nocapture %c, i32 %N) {
+entry:
+  %cmp8 = icmp eq i32 %N, 0
+  %tmp8 = add i32 %N, 15
+  %tmp9 = lshr i32 %tmp8, 4
+  %tmp10 = shl nuw i32 %tmp9, 4
+  %tmp11 = add i32 %tmp10, -16
+  %tmp12 = lshr i32 %tmp11, 4
+  %tmp13 = add nuw nsw i32 %tmp12, 1
+  br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:
+  br label %vector.body
+
+vector.body:
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %tmp14 = phi i32 [ %tmp13, %vector.ph ], [ %tmp15, %vector.body ]
+  %0 = phi i32 [ %N, %vector.ph ], [ %2, %vector.body ]
+  %tmp = getelementptr inbounds i8, i8* %a, i32 %index
+  %1 = call <16 x i1> @llvm.arm.mve.vctp8(i32 %0)
+  %2 = sub i32 %0, 16
+  %tmp2 = bitcast i8* %tmp to <16 x i8>*
+  %wide.masked.load = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %tmp2, i32 4, <16 x i1> %1, <16 x i8> undef)
+  %tmp3 = getelementptr inbounds i8, i8* %b, i32 %index
+  %tmp4 = bitcast i8* %tmp3 to <16 x i8>*
+  %wide.masked.load2 = tail call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %tmp4, i32 4, <16 x i1> %1, <16 x i8> undef)
+  %sub = call <16 x i8> @llvm.arm.mve.sub.predicated.v16i8.v16i1(<16 x i8> %wide.masked.load2, <16 x i8> %wide.masked.load, <16 x i1> %1, <16 x i8> undef)
+  %tmp6 = getelementptr inbounds i8, i8* %c, i32 %index
+  %tmp7 = bitcast i8* %tmp6 to <16 x i8>*
+  tail call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %sub, <16 x i8>* %tmp7, i32 4, <16 x i1> %1)
+  %index.next = add i32 %index, 16
+  %tmp15 = sub i32 %tmp14, 1
+  %tmp16 = icmp ne i32 %tmp15, 0
+  br i1 %tmp16, label %vector.body, label %for.cond.cleanup
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  ret void
+}
+
+declare <16 x i1> @llvm.arm.mve.vctp8(i32)
+declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
+declare <16 x i8> @llvm.arm.mve.sub.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>)
+declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
+
+
 !0 = distinct !{!0, !1}
 !1 = !{!"llvm.loop.isvectorized", i32 1}
 !2 = distinct !{!2, !3, !1}
Index: llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
===================================================================
--- llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -1235,6 +1235,11 @@
   unsigned Cost = 0;
   for (auto *BB : L->getBlocks()) {
     for (auto &I : *BB) {
+      // Don't unroll vectorised loop. MVE does not benefit from it as much as
+      // scalar code.
+      if (I.getType()->isVectorTy())
+        return;
+
       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
         ImmutableCallSite CS(&I);
         if (const Function *F = CS.getCalledFunction()) {
@@ -1243,10 +1248,6 @@
         }
         return;
       }
-      // Don't unroll vectorised loop. MVE does not benefit from it as much as
-      // scalar code.
-      if (I.getType()->isVectorTy())
-        return;
 
       SmallVector<const Value*, 4> Operands(I.value_op_begin(),
                                             I.value_op_end());


-------------- next part --------------
A non-text attachment was scrubbed...
Name: D72440.236997.patch
Type: text/x-patch
Size: 3739 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20200109/867bde0d/attachment.bin>


More information about the llvm-commits mailing list