[llvm] 03756a4 - [ARM][MVE] Combine more extending masked loads

Sam Parker via llvm-commits llvm-commits at lists.llvm.org
Sun Feb 23 23:50:35 PST 2020


Author: Sam Parker
Date: 2020-02-24T07:50:15Z
New Revision: 03756a41979915c63d0a9eb214524e03aaff876d

URL: https://github.com/llvm/llvm-project/commit/03756a41979915c63d0a9eb214524e03aaff876d
DIFF: https://github.com/llvm/llvm-project/commit/03756a41979915c63d0a9eb214524e03aaff876d.diff

LOG: [ARM][MVE] Combine more extending masked loads

For MVE, don't look at the users of the extending loads so that more
as desirable for folding.

Differential Revision: https://reviews.llvm.org/D74958

Added: 
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/extending-loads.ll

Modified: 
    llvm/lib/Target/ARM/ARMISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 7a91f2df6dcd..55336dda7364 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -15323,6 +15323,9 @@ bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
       return false;
   }
 
+  if (Subtarget->hasMVEIntegerOps())
+    return true;
+
   // Don't create a loadext if we can fold the extension into a wide/long
   // instruction.
   // If there's more than one user instruction, the loadext is desirable no

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/extending-loads.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/extending-loads.ll
new file mode 100644
index 000000000000..4512abb9584f
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/extending-loads.ll
@@ -0,0 +1,229 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -disable-mve-tail-predication=false %s -o - | FileCheck %s
+
+define dso_local arm_aapcs_vfpcc void @sext_i8(i16* noalias nocapture %a, i8* nocapture readonly %b, i32 %N) {
+; CHECK-LABEL: sext_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    popeq {r7, pc}
+; CHECK-NEXT:    mov r3, r0
+; CHECK-NEXT:    dlstp.16 lr, r2
+; CHECK:  	.LBB0_1: @ %vector.body
+; CHECK:    	 vldrb.s16 q0, [r1], #8
+; CHECK-NEXT:    vldrh.u16 q1, [r3], #16
+; CHECK-NEXT:    vadd.i16 q0, q1, q0
+; CHECK-NEXT:    vstrh.16 q0, [r0]
+; CHECK-NEXT:    mov r0, r3
+; CHECK-NEXT:    letp lr, .LBB0_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %cmp8 = icmp eq i32 %N, 0
+  br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %N, 7
+  %n.vec = and i32 %n.rnd.up, -8
+  %trip.count.minus.1 = add i32 %N, -1
+  %broadcast.splatinsert10 = insertelement <8 x i32> undef, i32 %trip.count.minus.1, i32 0
+  %broadcast.splat11 = shufflevector <8 x i32> %broadcast.splatinsert10, <8 x i32> undef, <8 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %broadcast.splatinsert = insertelement <8 x i32> undef, i32 %index, i32 0
+  %broadcast.splat = shufflevector <8 x i32> %broadcast.splatinsert, <8 x i32> undef, <8 x i32> zeroinitializer
+  %induction = or <8 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %0 = getelementptr inbounds i8, i8* %b, i32 %index
+  %1 = icmp ule <8 x i32> %induction, %broadcast.splat11
+  %2 = bitcast i8* %0 to <8 x i8>*
+  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %2, i32 1, <8 x i1> %1, <8 x i8> undef)
+  %3 = sext <8 x i8> %wide.masked.load to <8 x i16>
+  %4 = getelementptr inbounds i16, i16* %a, i32 %index
+  %5 = bitcast i16* %4 to <8 x i16>*
+  %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %5, i32 2, <8 x i1> %1, <8 x i16> undef)
+  %6 = add <8 x i16> %wide.masked.load12, %3
+  %7 = bitcast i16* %4 to <8 x i16>*
+  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %6, <8 x i16>* %7, i32 2, <8 x i1> %1)
+  %index.next = add i32 %index, 8
+  %8 = icmp eq i32 %index.next, %n.vec
+  br i1 %8, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  ret void
+}
+
+; Function Attrs: nofree norecurse nounwind
+define dso_local arm_aapcs_vfpcc void @zext_i8(i16* noalias nocapture %a, i8* nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
+; CHECK-LABEL: zext_i8:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    popeq {r7, pc}
+; CHECK-NEXT:    mov r3, r0
+; CHECK-NEXT:    dlstp.16 lr, r2
+; CHECK:  	.LBB1_1: @ %vector.body
+; CHECK:    	 vldrb.u16 q0, [r1], #8
+; CHECK-NEXT:    vldrh.u16 q1, [r3], #16
+; CHECK-NEXT:    vadd.i16 q0, q1, q0
+; CHECK-NEXT:    vstrh.16 q0, [r0]
+; CHECK-NEXT:    mov r0, r3
+; CHECK-NEXT:    letp lr, .LBB1_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %cmp8 = icmp eq i32 %N, 0
+  br i1 %cmp8, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %N, 7
+  %n.vec = and i32 %n.rnd.up, -8
+  %trip.count.minus.1 = add i32 %N, -1
+  %broadcast.splatinsert10 = insertelement <8 x i32> undef, i32 %trip.count.minus.1, i32 0
+  %broadcast.splat11 = shufflevector <8 x i32> %broadcast.splatinsert10, <8 x i32> undef, <8 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %broadcast.splatinsert = insertelement <8 x i32> undef, i32 %index, i32 0
+  %broadcast.splat = shufflevector <8 x i32> %broadcast.splatinsert, <8 x i32> undef, <8 x i32> zeroinitializer
+  %induction = or <8 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %0 = getelementptr inbounds i8, i8* %b, i32 %index
+  %1 = icmp ule <8 x i32> %induction, %broadcast.splat11
+  %2 = bitcast i8* %0 to <8 x i8>*
+  %wide.masked.load = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %2, i32 1, <8 x i1> %1, <8 x i8> undef)
+  %3 = zext <8 x i8> %wide.masked.load to <8 x i16>
+  %4 = getelementptr inbounds i16, i16* %a, i32 %index
+  %5 = bitcast i16* %4 to <8 x i16>*
+  %wide.masked.load12 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %5, i32 2, <8 x i1> %1, <8 x i16> undef)
+  %6 = add <8 x i16> %wide.masked.load12, %3
+  %7 = bitcast i16* %4 to <8 x i16>*
+  call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %6, <8 x i16>* %7, i32 2, <8 x i1> %1)
+  %index.next = add i32 %index, 8
+  %8 = icmp eq i32 %index.next, %n.vec
+  br i1 %8, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  ret void
+}
+
+; Function Attrs: nofree norecurse nounwind
+define dso_local arm_aapcs_vfpcc void @sext_i16(i32* noalias nocapture %a, i16* nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
+; CHECK-LABEL: sext_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    popeq {r7, pc}
+; CHECK-NEXT:    mov r3, r0
+; CHECK-NEXT:    dlstp.32 lr, r2
+; CHECK:  	.LBB2_1: @ %vector.body
+; CHECK:    	 vldrh.s32 q0, [r1], #8
+; CHECK-NEXT:    vldrw.u32 q1, [r3], #16
+; CHECK-NEXT:    vadd.i32 q0, q1, q0
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    mov r0, r3
+; CHECK-NEXT:    letp lr, .LBB2_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %cmp6 = icmp eq i32 %N, 0
+  br i1 %cmp6, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %N, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %N, -1
+  %broadcast.splatinsert8 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
+  %broadcast.splat9 = shufflevector <4 x i32> %broadcast.splatinsert8, <4 x i32> undef, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
+  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %induction = or <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
+  %0 = getelementptr inbounds i16, i16* %b, i32 %index
+  %1 = icmp ule <4 x i32> %induction, %broadcast.splat9
+  %2 = bitcast i16* %0 to <4 x i16>*
+  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef)
+  %3 = sext <4 x i16> %wide.masked.load to <4 x i32>
+  %4 = getelementptr inbounds i32, i32* %a, i32 %index
+  %5 = bitcast i32* %4 to <4 x i32>*
+  %wide.masked.load10 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %5, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %6 = add nsw <4 x i32> %wide.masked.load10, %3
+  %7 = bitcast i32* %4 to <4 x i32>*
+  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %6, <4 x i32>* %7, i32 4, <4 x i1> %1)
+  %index.next = add i32 %index, 4
+  %8 = icmp eq i32 %index.next, %n.vec
+  br i1 %8, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  ret void
+}
+
+; Function Attrs: nofree norecurse nounwind
+define dso_local arm_aapcs_vfpcc void @zext_i16(i32* noalias nocapture %a, i16* nocapture readonly %b, i32 %N) local_unnamed_addr #0 {
+; CHECK-LABEL: zext_i16:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    cmp r2, #0
+; CHECK-NEXT:    it eq
+; CHECK-NEXT:    popeq {r7, pc}
+; CHECK-NEXT:    mov r3, r0
+; CHECK-NEXT:    dlstp.32 lr, r2
+; CHECK:  	 .LBB3_1: @ %vector.body
+; CHECK:    	 vldrh.u32 q0, [r1], #8
+; CHECK-NEXT:    vldrw.u32 q1, [r3], #16
+; CHECK-NEXT:    vadd.i32 q0, q1, q0
+; CHECK-NEXT:    vstrw.32 q0, [r0]
+; CHECK-NEXT:    mov r0, r3
+; CHECK-NEXT:    letp lr, .LBB3_1
+; CHECK-NEXT:  @ %bb.2: @ %for.cond.cleanup
+; CHECK-NEXT:    pop {r7, pc}
+entry:
+  %cmp6 = icmp eq i32 %N, 0
+  br i1 %cmp6, label %for.cond.cleanup, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %n.rnd.up = add i32 %N, 3
+  %n.vec = and i32 %n.rnd.up, -4
+  %trip.count.minus.1 = add i32 %N, -1
+  %broadcast.splatinsert8 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0
+  %broadcast.splat9 = shufflevector <4 x i32> %broadcast.splatinsert8, <4 x i32> undef, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
+  %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
+  %induction = or <4 x i32> %broadcast.splat, <i32 0, i32 1, i32 2, i32 3>
+  %0 = getelementptr inbounds i16, i16* %b, i32 %index
+  %1 = icmp ule <4 x i32> %induction, %broadcast.splat9
+  %2 = bitcast i16* %0 to <4 x i16>*
+  %wide.masked.load = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %2, i32 2, <4 x i1> %1, <4 x i16> undef)
+  %3 = zext <4 x i16> %wide.masked.load to <4 x i32>
+  %4 = getelementptr inbounds i32, i32* %a, i32 %index
+  %5 = bitcast i32* %4 to <4 x i32>*
+  %wide.masked.load10 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %5, i32 4, <4 x i1> %1, <4 x i32> undef)
+  %6 = add <4 x i32> %wide.masked.load10, %3
+  %7 = bitcast i32* %4 to <4 x i32>*
+  call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %6, <4 x i32>* %7, i32 4, <4 x i1> %1)
+  %index.next = add i32 %index, 4
+  %8 = icmp eq i32 %index.next, %n.vec
+  br i1 %8, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body, %entry
+  ret void
+}
+
+declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32 immarg, <8 x i1>, <8 x i8>)
+declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32 immarg, <8 x i1>, <8 x i16>)
+declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32 immarg, <8 x i1>)
+declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32 immarg, <4 x i1>, <4 x i16>)
+declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>)
+declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
+


        


More information about the llvm-commits mailing list