[llvm] bae6f8f - [ARM] Add some tests for non-zero VCTP generation. NFC
David Green via llvm-commits
llvm-commits at lists.llvm.org
Sat Mar 25 16:33:30 PDT 2023
Author: David Green
Date: 2023-03-25T23:33:25Z
New Revision: bae6f8f95cae65ebe7c6f5008d7054aa86004cbe
URL: https://github.com/llvm/llvm-project/commit/bae6f8f95cae65ebe7c6f5008d7054aa86004cbe
DIFF: https://github.com/llvm/llvm-project/commit/bae6f8f95cae65ebe7c6f5008d7054aa86004cbe.diff
LOG: [ARM] Add some tests for non-zero VCTP generation. NFC
See D146517.
Added:
llvm/test/CodeGen/Thumb2/mve-tailpred-nonzerostart.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/Thumb2/mve-tailpred-nonzerostart.ll b/llvm/test/CodeGen/Thumb2/mve-tailpred-nonzerostart.ll
new file mode 100644
index 0000000000000..4491d9c2761fb
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/mve-tailpred-nonzerostart.ll
@@ -0,0 +1,291 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s
+
+define arm_aapcs_vfpcc void @start12(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
+; CHECK-LABEL: start12:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, r5, r7, lr}
+; CHECK-NEXT: push {r4, r5, r7, lr}
+; CHECK-NEXT: cmp r3, #1
+; CHECK-NEXT: blt .LBB0_3
+; CHECK-NEXT: @ %bb.1: @ %vector.ph
+; CHECK-NEXT: vmov r12, s0
+; CHECK-NEXT: adds r4, r3, #3
+; CHECK-NEXT: bic r4, r4, #3
+; CHECK-NEXT: adr r5, .LCPI0_0
+; CHECK-NEXT: sub.w lr, r4, #16
+; CHECK-NEXT: movs r4, #1
+; CHECK-NEXT: adds r0, #48
+; CHECK-NEXT: adds r1, #48
+; CHECK-NEXT: add.w lr, r4, lr, lsr #2
+; CHECK-NEXT: adds r2, #48
+; CHECK-NEXT: vldrw.u32 q0, [r5]
+; CHECK-NEXT: movs r4, #12
+; CHECK-NEXT: vdup.32 q1, r3
+; CHECK-NEXT: .LBB0_2: @ %vector.body
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vqadd.u32 q2, q0, r4
+; CHECK-NEXT: adds r4, #4
+; CHECK-NEXT: vptt.u32 hi, q1, q2
+; CHECK-NEXT: vldrwt.u32 q2, [r1], #16
+; CHECK-NEXT: vldrwt.u32 q3, [r0], #16
+; CHECK-NEXT: vfmas.f32 q3, q2, r12
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vstrwt.32 q3, [r2], #16
+; CHECK-NEXT: le lr, .LBB0_2
+; CHECK-NEXT: .LBB0_3: @ %for.cond.cleanup
+; CHECK-NEXT: pop {r4, r5, r7, pc}
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: @ %bb.4:
+; CHECK-NEXT: .LCPI0_0:
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 1 @ 0x1
+; CHECK-NEXT: .long 2 @ 0x2
+; CHECK-NEXT: .long 3 @ 0x3
+entry:
+ %cmp8 = icmp sgt i32 %n, 0
+ br i1 %cmp8, label %vector.ph, label %for.cond.cleanup
+
+vector.ph: ; preds = %entry
+ %n.rnd.up = add i32 %n, 3
+ %n.vec = and i32 %n.rnd.up, -4
+ %broadcast.splatinsert13 = insertelement <4 x float> undef, float %a, i32 0
+ %broadcast.splat14 = shufflevector <4 x float> %broadcast.splatinsert13, <4 x float> undef, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i32 [ 12, %vector.ph ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds float, ptr %x, i32 %index
+ %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
+ %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+ %2 = getelementptr inbounds float, ptr %y, i32 %index
+ %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %1, <4 x float> undef)
+ %3 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %wide.masked.load12, <4 x float> %broadcast.splat14)
+ %4 = getelementptr inbounds float, ptr %z, i32 %index
+ call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %4, i32 4, <4 x i1> %1)
+ %index.next = add i32 %index, 4
+ %5 = icmp eq i32 %index.next, %n.vec
+ br i1 %5, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body, %entry
+ ret void
+}
+
+
+define arm_aapcs_vfpcc void @start11(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
+; CHECK-LABEL: start11:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, r5, r7, lr}
+; CHECK-NEXT: push {r4, r5, r7, lr}
+; CHECK-NEXT: cmp r3, #1
+; CHECK-NEXT: blt .LBB1_3
+; CHECK-NEXT: @ %bb.1: @ %vector.ph
+; CHECK-NEXT: vmov r12, s0
+; CHECK-NEXT: adds r4, r3, #3
+; CHECK-NEXT: adr r5, .LCPI1_0
+; CHECK-NEXT: bic lr, r4, #3
+; CHECK-NEXT: adds r0, #44
+; CHECK-NEXT: adds r1, #44
+; CHECK-NEXT: adds r2, #44
+; CHECK-NEXT: vldrw.u32 q0, [r5]
+; CHECK-NEXT: movs r4, #11
+; CHECK-NEXT: vdup.32 q1, r3
+; CHECK-NEXT: .LBB1_2: @ %vector.body
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vqadd.u32 q2, q0, r4
+; CHECK-NEXT: adds r4, #4
+; CHECK-NEXT: cmp lr, r4
+; CHECK-NEXT: vptt.u32 hi, q1, q2
+; CHECK-NEXT: vldrwt.u32 q2, [r1], #16
+; CHECK-NEXT: vldrwt.u32 q3, [r0], #16
+; CHECK-NEXT: vfmas.f32 q3, q2, r12
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vstrwt.32 q3, [r2], #16
+; CHECK-NEXT: bne .LBB1_2
+; CHECK-NEXT: .LBB1_3: @ %for.cond.cleanup
+; CHECK-NEXT: pop {r4, r5, r7, pc}
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: @ %bb.4:
+; CHECK-NEXT: .LCPI1_0:
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 1 @ 0x1
+; CHECK-NEXT: .long 2 @ 0x2
+; CHECK-NEXT: .long 3 @ 0x3
+entry:
+ %cmp8 = icmp sgt i32 %n, 0
+ br i1 %cmp8, label %vector.ph, label %for.cond.cleanup
+
+vector.ph: ; preds = %entry
+ %n.rnd.up = add i32 %n, 3
+ %n.vec = and i32 %n.rnd.up, -4
+ %broadcast.splatinsert13 = insertelement <4 x float> undef, float %a, i32 0
+ %broadcast.splat14 = shufflevector <4 x float> %broadcast.splatinsert13, <4 x float> undef, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i32 [ 11, %vector.ph ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds float, ptr %x, i32 %index
+ %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
+ %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+ %2 = getelementptr inbounds float, ptr %y, i32 %index
+ %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %1, <4 x float> undef)
+ %3 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %wide.masked.load12, <4 x float> %broadcast.splat14)
+ %4 = getelementptr inbounds float, ptr %z, i32 %index
+ call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %4, i32 4, <4 x i1> %1)
+ %index.next = add i32 %index, 4
+ %5 = icmp eq i32 %index.next, %n.vec
+ br i1 %5, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body, %entry
+ ret void
+}
+
+define arm_aapcs_vfpcc void @startS(i32 %S, ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
+; CHECK-LABEL: startS:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, r5, r7, lr}
+; CHECK-NEXT: push {r4, r5, r7, lr}
+; CHECK-NEXT: ldr r5, [sp, #16]
+; CHECK-NEXT: cmp r5, #1
+; CHECK-NEXT: blt .LBB2_3
+; CHECK-NEXT: @ %bb.1: @ %vector.ph
+; CHECK-NEXT: vmov r12, s0
+; CHECK-NEXT: adds r4, r5, #3
+; CHECK-NEXT: bic lr, r4, #3
+; CHECK-NEXT: adr r4, .LCPI2_0
+; CHECK-NEXT: add.w r1, r1, r0, lsl #2
+; CHECK-NEXT: add.w r2, r2, r0, lsl #2
+; CHECK-NEXT: add.w r3, r3, r0, lsl #2
+; CHECK-NEXT: vldrw.u32 q0, [r4]
+; CHECK-NEXT: vdup.32 q1, r5
+; CHECK-NEXT: .LBB2_2: @ %vector.body
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vqadd.u32 q2, q0, r0
+; CHECK-NEXT: adds r0, #4
+; CHECK-NEXT: cmp lr, r0
+; CHECK-NEXT: vptt.u32 hi, q1, q2
+; CHECK-NEXT: vldrwt.u32 q2, [r2], #16
+; CHECK-NEXT: vldrwt.u32 q3, [r1], #16
+; CHECK-NEXT: vfmas.f32 q3, q2, r12
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vstrwt.32 q3, [r3], #16
+; CHECK-NEXT: bne .LBB2_2
+; CHECK-NEXT: .LBB2_3: @ %for.cond.cleanup
+; CHECK-NEXT: pop {r4, r5, r7, pc}
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: @ %bb.4:
+; CHECK-NEXT: .LCPI2_0:
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 1 @ 0x1
+; CHECK-NEXT: .long 2 @ 0x2
+; CHECK-NEXT: .long 3 @ 0x3
+entry:
+ %cmp8 = icmp sgt i32 %n, 0
+ br i1 %cmp8, label %vector.ph, label %for.cond.cleanup
+
+vector.ph: ; preds = %entry
+ %n.rnd.up = add i32 %n, 3
+ %n.vec = and i32 %n.rnd.up, -4
+ %broadcast.splatinsert13 = insertelement <4 x float> undef, float %a, i32 0
+ %broadcast.splat14 = shufflevector <4 x float> %broadcast.splatinsert13, <4 x float> undef, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i32 [ %S, %vector.ph ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds float, ptr %x, i32 %index
+ %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
+ %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+ %2 = getelementptr inbounds float, ptr %y, i32 %index
+ %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %1, <4 x float> undef)
+ %3 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %wide.masked.load12, <4 x float> %broadcast.splat14)
+ %4 = getelementptr inbounds float, ptr %z, i32 %index
+ call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %4, i32 4, <4 x i1> %1)
+ %index.next = add i32 %index, 4
+ %5 = icmp eq i32 %index.next, %n.vec
+ br i1 %5, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body, %entry
+ ret void
+}
+
+define arm_aapcs_vfpcc void @startSmod4(i32 %S, ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, float %a, i32 %n) {
+; CHECK-LABEL: startSmod4:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .save {r4, r5, r6, lr}
+; CHECK-NEXT: push {r4, r5, r6, lr}
+; CHECK-NEXT: ldr r6, [sp, #16]
+; CHECK-NEXT: cmp r6, #1
+; CHECK-NEXT: blt .LBB3_3
+; CHECK-NEXT: @ %bb.1: @ %vector.ph
+; CHECK-NEXT: vmov r12, s0
+; CHECK-NEXT: mvn r4, #12
+; CHECK-NEXT: and.w r4, r4, r0, lsl #2
+; CHECK-NEXT: bic r0, r0, #3
+; CHECK-NEXT: add r1, r4
+; CHECK-NEXT: add r2, r4
+; CHECK-NEXT: add r3, r4
+; CHECK-NEXT: adds r4, r6, #3
+; CHECK-NEXT: bic r4, r4, #3
+; CHECK-NEXT: movs r5, #1
+; CHECK-NEXT: subs r4, r4, r0
+; CHECK-NEXT: vdup.32 q1, r6
+; CHECK-NEXT: subs r4, #4
+; CHECK-NEXT: add.w lr, r5, r4, lsr #2
+; CHECK-NEXT: adr r4, .LCPI3_0
+; CHECK-NEXT: vldrw.u32 q0, [r4]
+; CHECK-NEXT: .LBB3_2: @ %vector.body
+; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vqadd.u32 q2, q0, r0
+; CHECK-NEXT: adds r0, #4
+; CHECK-NEXT: vptt.u32 hi, q1, q2
+; CHECK-NEXT: vldrwt.u32 q2, [r2], #16
+; CHECK-NEXT: vldrwt.u32 q3, [r1], #16
+; CHECK-NEXT: vfmas.f32 q3, q2, r12
+; CHECK-NEXT: vpst
+; CHECK-NEXT: vstrwt.32 q3, [r3], #16
+; CHECK-NEXT: le lr, .LBB3_2
+; CHECK-NEXT: .LBB3_3: @ %for.cond.cleanup
+; CHECK-NEXT: pop {r4, r5, r6, pc}
+; CHECK-NEXT: .p2align 4
+; CHECK-NEXT: @ %bb.4:
+; CHECK-NEXT: .LCPI3_0:
+; CHECK-NEXT: .long 0 @ 0x0
+; CHECK-NEXT: .long 1 @ 0x1
+; CHECK-NEXT: .long 2 @ 0x2
+; CHECK-NEXT: .long 3 @ 0x3
+entry:
+ %cmp8 = icmp sgt i32 %n, 0
+ br i1 %cmp8, label %vector.ph, label %for.cond.cleanup
+
+vector.ph: ; preds = %entry
+ %Sm = and i32 %S, -4
+ %n.rnd.up = add i32 %n, 3
+ %n.vec = and i32 %n.rnd.up, -4
+ %broadcast.splatinsert13 = insertelement <4 x float> undef, float %a, i32 0
+ %broadcast.splat14 = shufflevector <4 x float> %broadcast.splatinsert13, <4 x float> undef, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i32 [ %Sm, %vector.ph ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds float, ptr %x, i32 %index
+ %1 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n)
+ %wide.masked.load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %0, i32 4, <4 x i1> %1, <4 x float> undef)
+ %2 = getelementptr inbounds float, ptr %y, i32 %index
+ %wide.masked.load12 = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %2, i32 4, <4 x i1> %1, <4 x float> undef)
+ %3 = call fast <4 x float> @llvm.fma.v4f32(<4 x float> %wide.masked.load, <4 x float> %wide.masked.load12, <4 x float> %broadcast.splat14)
+ %4 = getelementptr inbounds float, ptr %z, i32 %index
+ call void @llvm.masked.store.v4f32.p0(<4 x float> %3, ptr %4, i32 4, <4 x i1> %1)
+ %index.next = add i32 %index, 4
+ %5 = icmp eq i32 %index.next, %n.vec
+ br i1 %5, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body, %entry
+ ret void
+}
+
+
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr, i32 immarg, <4 x i1>)
+declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
+
More information about the llvm-commits
mailing list