[llvm] 437fa02 - [ARM] Add tests for over-sized mulh. NFC
David Green via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 2 02:46:45 PST 2025
Author: David Green
Date: 2025-12-02T10:46:39Z
New Revision: 437fa02c074221ddc635caf6261e056ce44f5178
URL: https://github.com/llvm/llvm-project/commit/437fa02c074221ddc635caf6261e056ce44f5178
DIFF: https://github.com/llvm/llvm-project/commit/437fa02c074221ddc635caf6261e056ce44f5178.diff
LOG: [ARM] Add tests for over-sized mulh. NFC
The double-sized v8i32 do OK, but the larger v16i32 do not current get
converted to umulh.
Added:
Modified:
llvm/test/CodeGen/Thumb2/mve-vmulh.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/Thumb2/mve-vmulh.ll b/llvm/test/CodeGen/Thumb2/mve-vmulh.ll
index 32648b6b449a8..37f5e26c6e5a0 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vmulh.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vmulh.ll
@@ -71,6 +71,203 @@ entry:
ret <4 x i32> %s2
}
+define arm_aapcs_vfpcc <8 x i32> @vmulhs_v8i32(<8 x i32> %s0, <8 x i32> %s1) {
+; CHECK-LABEL: vmulhs_v8i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmulh.s32 q0, q0, q2
+; CHECK-NEXT: vmulh.s32 q1, q1, q3
+; CHECK-NEXT: bx lr
+entry:
+ %s0s = sext <8 x i32> %s0 to <8 x i64>
+ %s1s = sext <8 x i32> %s1 to <8 x i64>
+ %m = mul <8 x i64> %s0s, %s1s
+ %s = ashr <8 x i64> %m, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %s2 = trunc <8 x i64> %s to <8 x i32>
+ ret <8 x i32> %s2
+}
+
+define arm_aapcs_vfpcc <8 x i32> @vmulhu_v8i32(<8 x i32> %s0, <8 x i32> %s1) {
+; CHECK-LABEL: vmulhu_v8i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmulh.u32 q0, q0, q2
+; CHECK-NEXT: vmulh.u32 q1, q1, q3
+; CHECK-NEXT: bx lr
+entry:
+ %s0s = zext <8 x i32> %s0 to <8 x i64>
+ %s1s = zext <8 x i32> %s1 to <8 x i64>
+ %m = mul <8 x i64> %s0s, %s1s
+ %s = lshr <8 x i64> %m, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %s2 = trunc <8 x i64> %s to <8 x i32>
+ ret <8 x i32> %s2
+}
+
+define arm_aapcs_vfpcc <16 x i32> @vmulhs_v16i32(<16 x i32> %s0, <16 x i32> %s1) {
+; CHECK-LABEL: vmulhs_v16i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d11, d12, d13, d14, d15}
+; CHECK-NEXT: vpush {d11, d12, d13, d14, d15}
+; CHECK-NEXT: .vsave {d9}
+; CHECK-NEXT: vpush {d9}
+; CHECK-NEXT: add r1, sp, #48
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: vldrw.u32 q6, [r1]
+; CHECK-NEXT: vmov.f32 s18, s1
+; CHECK-NEXT: vmov.f32 s0, s2
+; CHECK-NEXT: vmov r1, s24
+; CHECK-NEXT: vmov.f32 s22, s25
+; CHECK-NEXT: vmov.f32 s2, s3
+; CHECK-NEXT: vmov.f32 s24, s26
+; CHECK-NEXT: vmov.f32 s26, s27
+; CHECK-NEXT: vmullb.s32 q7, q0, q6
+; CHECK-NEXT: smmul r0, r0, r1
+; CHECK-NEXT: vmov r1, s29
+; CHECK-NEXT: vmov q0[2], q0[0], r0, r1
+; CHECK-NEXT: vmov r0, s18
+; CHECK-NEXT: vmov r1, s22
+; CHECK-NEXT: vmov.f32 s18, s5
+; CHECK-NEXT: smmul r0, r0, r1
+; CHECK-NEXT: vmov r1, s31
+; CHECK-NEXT: vmov q0[3], q0[1], r0, r1
+; CHECK-NEXT: add r1, sp, #64
+; CHECK-NEXT: vldrw.u32 q6, [r1]
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmov.f32 s4, s6
+; CHECK-NEXT: vmov r1, s24
+; CHECK-NEXT: vmov.f32 s22, s25
+; CHECK-NEXT: vmov.f32 s6, s7
+; CHECK-NEXT: vmov.f32 s24, s26
+; CHECK-NEXT: vmov.f32 s26, s27
+; CHECK-NEXT: vmullb.s32 q7, q1, q6
+; CHECK-NEXT: smmul r0, r0, r1
+; CHECK-NEXT: vmov r1, s29
+; CHECK-NEXT: vmov q1[2], q1[0], r0, r1
+; CHECK-NEXT: vmov r0, s18
+; CHECK-NEXT: vmov r1, s22
+; CHECK-NEXT: vmov.f32 s18, s9
+; CHECK-NEXT: smmul r0, r0, r1
+; CHECK-NEXT: vmov r1, s31
+; CHECK-NEXT: vmov q1[3], q1[1], r0, r1
+; CHECK-NEXT: add r1, sp, #80
+; CHECK-NEXT: vldrw.u32 q6, [r1]
+; CHECK-NEXT: vmov r0, s8
+; CHECK-NEXT: vmov.f32 s8, s10
+; CHECK-NEXT: vmov r1, s24
+; CHECK-NEXT: vmov.f32 s22, s25
+; CHECK-NEXT: vmov.f32 s10, s11
+; CHECK-NEXT: vmov.f32 s24, s26
+; CHECK-NEXT: vmov.f32 s26, s27
+; CHECK-NEXT: vmullb.s32 q7, q2, q6
+; CHECK-NEXT: smmul r0, r0, r1
+; CHECK-NEXT: vmov r1, s29
+; CHECK-NEXT: vmov q2[2], q2[0], r0, r1
+; CHECK-NEXT: vmov r0, s18
+; CHECK-NEXT: vmov r1, s22
+; CHECK-NEXT: vmov.f32 s18, s13
+; CHECK-NEXT: smmul r0, r0, r1
+; CHECK-NEXT: vmov r1, s31
+; CHECK-NEXT: vmov q2[3], q2[1], r0, r1
+; CHECK-NEXT: add r1, sp, #96
+; CHECK-NEXT: vldrw.u32 q6, [r1]
+; CHECK-NEXT: vmov r0, s12
+; CHECK-NEXT: vmov.f32 s12, s14
+; CHECK-NEXT: vmov r1, s24
+; CHECK-NEXT: vmov.f32 s22, s25
+; CHECK-NEXT: vmov.f32 s14, s15
+; CHECK-NEXT: vmov.f32 s24, s26
+; CHECK-NEXT: vmov.f32 s26, s27
+; CHECK-NEXT: vmullb.s32 q7, q3, q6
+; CHECK-NEXT: smmul r0, r0, r1
+; CHECK-NEXT: vmov r1, s29
+; CHECK-NEXT: vmov q3[2], q3[0], r0, r1
+; CHECK-NEXT: vmov r0, s18
+; CHECK-NEXT: vmov r1, s22
+; CHECK-NEXT: smmul r0, r0, r1
+; CHECK-NEXT: vmov r1, s31
+; CHECK-NEXT: vmov q3[3], q3[1], r0, r1
+; CHECK-NEXT: vpop {d9}
+; CHECK-NEXT: vpop {d11, d12, d13, d14, d15}
+; CHECK-NEXT: bx lr
+entry:
+ %s0s = sext <16 x i32> %s0 to <16 x i64>
+ %s1s = sext <16 x i32> %s1 to <16 x i64>
+ %m = mul <16 x i64> %s0s, %s1s
+ %s = ashr <16 x i64> %m, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %s2 = trunc <16 x i64> %s to <16 x i32>
+ ret <16 x i32> %s2
+}
+
+define arm_aapcs_vfpcc <16 x i32> @vmulhu_v16i32(<16 x i32> %s0, <16 x i32> %s1) {
+; CHECK-LABEL: vmulhu_v16i32:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: add r0, sp, #64
+; CHECK-NEXT: vmov.f32 s24, s2
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vmov.f32 s26, s3
+; CHECK-NEXT: vmov.f32 s2, s1
+; CHECK-NEXT: add r0, sp, #80
+; CHECK-NEXT: vmov.f32 s28, s18
+; CHECK-NEXT: vmov.f32 s30, s19
+; CHECK-NEXT: vmov.f32 s18, s17
+; CHECK-NEXT: vmullb.u32 q5, q6, q7
+; CHECK-NEXT: vmullb.u32 q6, q0, q4
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vmov.f32 s0, s25
+; CHECK-NEXT: add r0, sp, #96
+; CHECK-NEXT: vmov.f32 s1, s27
+; CHECK-NEXT: vmov.f32 s24, s6
+; CHECK-NEXT: vmov.f32 s26, s7
+; CHECK-NEXT: vmov.f32 s28, s18
+; CHECK-NEXT: vmov.f32 s30, s19
+; CHECK-NEXT: vmov.f32 s6, s5
+; CHECK-NEXT: vmov.f32 s18, s17
+; CHECK-NEXT: vmov.f32 s2, s21
+; CHECK-NEXT: vmov.f32 s3, s23
+; CHECK-NEXT: vmullb.u32 q5, q6, q7
+; CHECK-NEXT: vmullb.u32 q6, q1, q4
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vmov.f32 s4, s25
+; CHECK-NEXT: add r0, sp, #112
+; CHECK-NEXT: vmov.f32 s5, s27
+; CHECK-NEXT: vmov.f32 s24, s10
+; CHECK-NEXT: vmov.f32 s26, s11
+; CHECK-NEXT: vmov.f32 s28, s18
+; CHECK-NEXT: vmov.f32 s30, s19
+; CHECK-NEXT: vmov.f32 s10, s9
+; CHECK-NEXT: vmov.f32 s18, s17
+; CHECK-NEXT: vmov.f32 s6, s21
+; CHECK-NEXT: vmov.f32 s7, s23
+; CHECK-NEXT: vmullb.u32 q5, q6, q7
+; CHECK-NEXT: vmullb.u32 q6, q2, q4
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vmov.f32 s8, s25
+; CHECK-NEXT: vmov.f32 s9, s27
+; CHECK-NEXT: vmov.f32 s24, s14
+; CHECK-NEXT: vmov.f32 s26, s15
+; CHECK-NEXT: vmov.f32 s28, s18
+; CHECK-NEXT: vmov.f32 s30, s19
+; CHECK-NEXT: vmov.f32 s14, s13
+; CHECK-NEXT: vmov.f32 s18, s17
+; CHECK-NEXT: vmov.f32 s10, s21
+; CHECK-NEXT: vmov.f32 s11, s23
+; CHECK-NEXT: vmullb.u32 q5, q6, q7
+; CHECK-NEXT: vmullb.u32 q6, q3, q4
+; CHECK-NEXT: vmov.f32 s14, s21
+; CHECK-NEXT: vmov.f32 s12, s25
+; CHECK-NEXT: vmov.f32 s13, s27
+; CHECK-NEXT: vmov.f32 s15, s23
+; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
+; CHECK-NEXT: bx lr
+entry:
+ %s0s = zext <16 x i32> %s0 to <16 x i64>
+ %s1s = zext <16 x i32> %s1 to <16 x i64>
+ %m = mul <16 x i64> %s0s, %s1s
+ %s = lshr <16 x i64> %m, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
+ %s2 = trunc <16 x i64> %s to <16 x i32>
+ ret <16 x i32> %s2
+}
+
define arm_aapcs_vfpcc <4 x i16> @vmulhs_v4i16(<4 x i16> %s0, <4 x i16> %s1) {
; CHECK-LABEL: vmulhs_v4i16:
; CHECK: @ %bb.0: @ %entry
@@ -129,6 +326,124 @@ entry:
ret <8 x i16> %s2
}
+define arm_aapcs_vfpcc <16 x i16> @vmulhs_v16i16(<16 x i16> %s0, <16 x i16> %s1) {
+; CHECK-LABEL: vmulhs_v16i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmulh.s16 q0, q0, q2
+; CHECK-NEXT: vmulh.s16 q1, q1, q3
+; CHECK-NEXT: bx lr
+entry:
+ %s0s = sext <16 x i16> %s0 to <16 x i32>
+ %s1s = sext <16 x i16> %s1 to <16 x i32>
+ %m = mul <16 x i32> %s0s, %s1s
+ %s = ashr <16 x i32> %m, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+ %s2 = trunc <16 x i32> %s to <16 x i16>
+ ret <16 x i16> %s2
+}
+
+define arm_aapcs_vfpcc <16 x i16> @vmulhu_v16i16(<16 x i16> %s0, <16 x i16> %s1) {
+; CHECK-LABEL: vmulhu_v16i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmulh.u16 q0, q0, q2
+; CHECK-NEXT: vmulh.u16 q1, q1, q3
+; CHECK-NEXT: bx lr
+entry:
+ %s0s = zext <16 x i16> %s0 to <16 x i32>
+ %s1s = zext <16 x i16> %s1 to <16 x i32>
+ %m = mul <16 x i32> %s0s, %s1s
+ %s = lshr <16 x i32> %m, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+ %s2 = trunc <16 x i32> %s to <16 x i16>
+ ret <16 x i16> %s2
+}
+
+define arm_aapcs_vfpcc <32 x i16> @vmulhs_v32i16(<32 x i16> %s0, <32 x i16> %s1) {
+; CHECK-LABEL: vmulhs_v32i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEXT: vpush {d8, d9, d10, d11}
+; CHECK-NEXT: add r0, sp, #32
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: add r0, sp, #48
+; CHECK-NEXT: vmullt.s16 q5, q0, q4
+; CHECK-NEXT: vmullb.s16 q0, q0, q4
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vshr.u32 q5, q5, #16
+; CHECK-NEXT: vshr.u32 q0, q0, #16
+; CHECK-NEXT: add r0, sp, #64
+; CHECK-NEXT: vmovnt.i32 q0, q5
+; CHECK-NEXT: vmullt.s16 q5, q1, q4
+; CHECK-NEXT: vmullb.s16 q1, q1, q4
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vshr.u32 q5, q5, #16
+; CHECK-NEXT: vshr.u32 q1, q1, #16
+; CHECK-NEXT: add r0, sp, #80
+; CHECK-NEXT: vmovnt.i32 q1, q5
+; CHECK-NEXT: vmullt.s16 q5, q2, q4
+; CHECK-NEXT: vmullb.s16 q2, q2, q4
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vshr.u32 q5, q5, #16
+; CHECK-NEXT: vshr.u32 q2, q2, #16
+; CHECK-NEXT: vmovnt.i32 q2, q5
+; CHECK-NEXT: vmullt.s16 q5, q3, q4
+; CHECK-NEXT: vmullb.s16 q3, q3, q4
+; CHECK-NEXT: vshr.u32 q5, q5, #16
+; CHECK-NEXT: vshr.u32 q3, q3, #16
+; CHECK-NEXT: vmovnt.i32 q3, q5
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
+; CHECK-NEXT: bx lr
+entry:
+ %s0s = sext <32 x i16> %s0 to <32 x i32>
+ %s1s = sext <32 x i16> %s1 to <32 x i32>
+ %m = mul <32 x i32> %s0s, %s1s
+ %s = ashr <32 x i32> %m, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+ %s2 = trunc <32 x i32> %s to <32 x i16>
+ ret <32 x i16> %s2
+}
+
+define arm_aapcs_vfpcc <32 x i16> @vmulhu_v32i16(<32 x i16> %s0, <32 x i16> %s1) {
+; CHECK-LABEL: vmulhu_v32i16:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEXT: vpush {d8, d9, d10, d11}
+; CHECK-NEXT: add r0, sp, #32
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: add r0, sp, #48
+; CHECK-NEXT: vmullt.u16 q5, q0, q4
+; CHECK-NEXT: vmullb.u16 q0, q0, q4
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vshr.u32 q5, q5, #16
+; CHECK-NEXT: vshr.u32 q0, q0, #16
+; CHECK-NEXT: add r0, sp, #64
+; CHECK-NEXT: vmovnt.i32 q0, q5
+; CHECK-NEXT: vmullt.u16 q5, q1, q4
+; CHECK-NEXT: vmullb.u16 q1, q1, q4
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vshr.u32 q5, q5, #16
+; CHECK-NEXT: vshr.u32 q1, q1, #16
+; CHECK-NEXT: add r0, sp, #80
+; CHECK-NEXT: vmovnt.i32 q1, q5
+; CHECK-NEXT: vmullt.u16 q5, q2, q4
+; CHECK-NEXT: vmullb.u16 q2, q2, q4
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vshr.u32 q5, q5, #16
+; CHECK-NEXT: vshr.u32 q2, q2, #16
+; CHECK-NEXT: vmovnt.i32 q2, q5
+; CHECK-NEXT: vmullt.u16 q5, q3, q4
+; CHECK-NEXT: vmullb.u16 q3, q3, q4
+; CHECK-NEXT: vshr.u32 q5, q5, #16
+; CHECK-NEXT: vshr.u32 q3, q3, #16
+; CHECK-NEXT: vmovnt.i32 q3, q5
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
+; CHECK-NEXT: bx lr
+entry:
+ %s0s = zext <32 x i16> %s0 to <32 x i32>
+ %s1s = zext <32 x i16> %s1 to <32 x i32>
+ %m = mul <32 x i32> %s0s, %s1s
+ %s = lshr <32 x i32> %m, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
+ %s2 = trunc <32 x i32> %s to <32 x i16>
+ ret <32 x i16> %s2
+}
+
define arm_aapcs_vfpcc <4 x i8> @vmulhs_v4i8(<4 x i8> %s0, <4 x i8> %s1) {
; CHECK-LABEL: vmulhs_v4i8:
; CHECK: @ %bb.0: @ %entry
@@ -224,19 +539,137 @@ entry:
ret <16 x i8> %s2
}
+define arm_aapcs_vfpcc <32 x i8> @vmulhs_v32i8(<32 x i8> %s0, <32 x i8> %s1) {
+; CHECK-LABEL: vmulhs_v32i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmulh.s8 q0, q0, q2
+; CHECK-NEXT: vmulh.s8 q1, q1, q3
+; CHECK-NEXT: bx lr
+entry:
+ %s0s = sext <32 x i8> %s0 to <32 x i16>
+ %s1s = sext <32 x i8> %s1 to <32 x i16>
+ %m = mul <32 x i16> %s0s, %s1s
+ %s = ashr <32 x i16> %m, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %s2 = trunc <32 x i16> %s to <32 x i8>
+ ret <32 x i8> %s2
+}
+
+define arm_aapcs_vfpcc <32 x i8> @vmulhu_v32i8(<32 x i8> %s0, <32 x i8> %s1) {
+; CHECK-LABEL: vmulhu_v32i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmulh.u8 q0, q0, q2
+; CHECK-NEXT: vmulh.u8 q1, q1, q3
+; CHECK-NEXT: bx lr
+entry:
+ %s0s = zext <32 x i8> %s0 to <32 x i16>
+ %s1s = zext <32 x i8> %s1 to <32 x i16>
+ %m = mul <32 x i16> %s0s, %s1s
+ %s = lshr <32 x i16> %m, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %s2 = trunc <32 x i16> %s to <32 x i8>
+ ret <32 x i8> %s2
+}
+
+define arm_aapcs_vfpcc <64 x i8> @vmulhs_v64i8(<64 x i8> %s0, <64 x i8> %s1) {
+; CHECK-LABEL: vmulhs_v64i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEXT: vpush {d8, d9, d10, d11}
+; CHECK-NEXT: add r0, sp, #32
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: add r0, sp, #48
+; CHECK-NEXT: vmullt.s8 q5, q0, q4
+; CHECK-NEXT: vmullb.s8 q0, q0, q4
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vshr.u16 q5, q5, #8
+; CHECK-NEXT: vshr.u16 q0, q0, #8
+; CHECK-NEXT: add r0, sp, #64
+; CHECK-NEXT: vmovnt.i16 q0, q5
+; CHECK-NEXT: vmullt.s8 q5, q1, q4
+; CHECK-NEXT: vmullb.s8 q1, q1, q4
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vshr.u16 q5, q5, #8
+; CHECK-NEXT: vshr.u16 q1, q1, #8
+; CHECK-NEXT: add r0, sp, #80
+; CHECK-NEXT: vmovnt.i16 q1, q5
+; CHECK-NEXT: vmullt.s8 q5, q2, q4
+; CHECK-NEXT: vmullb.s8 q2, q2, q4
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vshr.u16 q5, q5, #8
+; CHECK-NEXT: vshr.u16 q2, q2, #8
+; CHECK-NEXT: vmovnt.i16 q2, q5
+; CHECK-NEXT: vmullt.s8 q5, q3, q4
+; CHECK-NEXT: vmullb.s8 q3, q3, q4
+; CHECK-NEXT: vshr.u16 q5, q5, #8
+; CHECK-NEXT: vshr.u16 q3, q3, #8
+; CHECK-NEXT: vmovnt.i16 q3, q5
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
+; CHECK-NEXT: bx lr
+entry:
+ %s0s = sext <64 x i8> %s0 to <64 x i16>
+ %s1s = sext <64 x i8> %s1 to <64 x i16>
+ %m = mul <64 x i16> %s0s, %s1s
+ %s = ashr <64 x i16> %m, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %s2 = trunc <64 x i16> %s to <64 x i8>
+ ret <64 x i8> %s2
+}
+
+define arm_aapcs_vfpcc <64 x i8> @vmulhu_v64i8(<64 x i8> %s0, <64 x i8> %s1) {
+; CHECK-LABEL: vmulhu_v64i8:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEXT: vpush {d8, d9, d10, d11}
+; CHECK-NEXT: add r0, sp, #32
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: add r0, sp, #48
+; CHECK-NEXT: vmullt.u8 q5, q0, q4
+; CHECK-NEXT: vmullb.u8 q0, q0, q4
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vshr.u16 q5, q5, #8
+; CHECK-NEXT: vshr.u16 q0, q0, #8
+; CHECK-NEXT: add r0, sp, #64
+; CHECK-NEXT: vmovnt.i16 q0, q5
+; CHECK-NEXT: vmullt.u8 q5, q1, q4
+; CHECK-NEXT: vmullb.u8 q1, q1, q4
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vshr.u16 q5, q5, #8
+; CHECK-NEXT: vshr.u16 q1, q1, #8
+; CHECK-NEXT: add r0, sp, #80
+; CHECK-NEXT: vmovnt.i16 q1, q5
+; CHECK-NEXT: vmullt.u8 q5, q2, q4
+; CHECK-NEXT: vmullb.u8 q2, q2, q4
+; CHECK-NEXT: vldrw.u32 q4, [r0]
+; CHECK-NEXT: vshr.u16 q5, q5, #8
+; CHECK-NEXT: vshr.u16 q2, q2, #8
+; CHECK-NEXT: vmovnt.i16 q2, q5
+; CHECK-NEXT: vmullt.u8 q5, q3, q4
+; CHECK-NEXT: vmullb.u8 q3, q3, q4
+; CHECK-NEXT: vshr.u16 q5, q5, #8
+; CHECK-NEXT: vshr.u16 q3, q3, #8
+; CHECK-NEXT: vmovnt.i16 q3, q5
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
+; CHECK-NEXT: bx lr
+entry:
+ %s0s = zext <64 x i8> %s0 to <64 x i16>
+ %s1s = zext <64 x i8> %s1 to <64 x i16>
+ %m = mul <64 x i16> %s0s, %s1s
+ %s = lshr <64 x i16> %m, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %s2 = trunc <64 x i16> %s to <64 x i8>
+ ret <64 x i8> %s2
+}
+
define void @vmulh_s8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) {
; CHECK-LABEL: vmulh_s8:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r7, lr}
; CHECK-NEXT: push {r7, lr}
; CHECK-NEXT: mov.w lr, #64
-; CHECK-NEXT: .LBB14_1: @ %vector.body
+; CHECK-NEXT: .LBB26_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vldrb.u8 q0, [r0], #16
; CHECK-NEXT: vldrb.u8 q1, [r1], #16
; CHECK-NEXT: vmulh.s8 q0, q1, q0
; CHECK-NEXT: vstrb.8 q0, [r2], #16
-; CHECK-NEXT: le lr, .LBB14_1
+; CHECK-NEXT: le lr, .LBB26_1
; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup
; CHECK-NEXT: pop {r7, pc}
entry:
@@ -269,13 +702,13 @@ define void @vmulh_s16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr
; CHECK-NEXT: .save {r7, lr}
; CHECK-NEXT: push {r7, lr}
; CHECK-NEXT: mov.w lr, #128
-; CHECK-NEXT: .LBB15_1: @ %vector.body
+; CHECK-NEXT: .LBB27_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vldrh.u16 q0, [r0], #16
; CHECK-NEXT: vldrh.u16 q1, [r1], #16
; CHECK-NEXT: vmulh.s16 q0, q1, q0
; CHECK-NEXT: vstrb.8 q0, [r2], #16
-; CHECK-NEXT: le lr, .LBB15_1
+; CHECK-NEXT: le lr, .LBB27_1
; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup
; CHECK-NEXT: pop {r7, pc}
entry:
@@ -308,13 +741,13 @@ define void @vmulh_s32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr
; CHECK-NEXT: .save {r7, lr}
; CHECK-NEXT: push {r7, lr}
; CHECK-NEXT: mov.w lr, #256
-; CHECK-NEXT: .LBB16_1: @ %vector.body
+; CHECK-NEXT: .LBB28_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vldrw.u32 q0, [r0], #16
; CHECK-NEXT: vldrw.u32 q1, [r1], #16
; CHECK-NEXT: vmulh.s32 q0, q1, q0
; CHECK-NEXT: vstrb.8 q0, [r2], #16
-; CHECK-NEXT: le lr, .LBB16_1
+; CHECK-NEXT: le lr, .LBB28_1
; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup
; CHECK-NEXT: pop {r7, pc}
entry:
@@ -347,13 +780,13 @@ define void @vmulh_u8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr
; CHECK-NEXT: .save {r7, lr}
; CHECK-NEXT: push {r7, lr}
; CHECK-NEXT: mov.w lr, #64
-; CHECK-NEXT: .LBB17_1: @ %vector.body
+; CHECK-NEXT: .LBB29_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vldrb.u8 q0, [r0], #16
; CHECK-NEXT: vldrb.u8 q1, [r1], #16
; CHECK-NEXT: vmulh.u8 q0, q1, q0
; CHECK-NEXT: vstrb.8 q0, [r2], #16
-; CHECK-NEXT: le lr, .LBB17_1
+; CHECK-NEXT: le lr, .LBB29_1
; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup
; CHECK-NEXT: pop {r7, pc}
entry:
@@ -386,13 +819,13 @@ define void @vmulh_u16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr
; CHECK-NEXT: .save {r7, lr}
; CHECK-NEXT: push {r7, lr}
; CHECK-NEXT: mov.w lr, #128
-; CHECK-NEXT: .LBB18_1: @ %vector.body
+; CHECK-NEXT: .LBB30_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vldrh.u16 q0, [r0], #16
; CHECK-NEXT: vldrh.u16 q1, [r1], #16
; CHECK-NEXT: vmulh.u16 q0, q1, q0
; CHECK-NEXT: vstrb.8 q0, [r2], #16
-; CHECK-NEXT: le lr, .LBB18_1
+; CHECK-NEXT: le lr, .LBB30_1
; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup
; CHECK-NEXT: pop {r7, pc}
entry:
@@ -425,13 +858,13 @@ define void @vmulh_u32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr
; CHECK-NEXT: .save {r7, lr}
; CHECK-NEXT: push {r7, lr}
; CHECK-NEXT: mov.w lr, #256
-; CHECK-NEXT: .LBB19_1: @ %vector.body
+; CHECK-NEXT: .LBB31_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vldrw.u32 q0, [r0], #16
; CHECK-NEXT: vldrw.u32 q1, [r1], #16
; CHECK-NEXT: vmulh.u32 q0, q1, q0
; CHECK-NEXT: vstrb.8 q0, [r2], #16
-; CHECK-NEXT: le lr, .LBB19_1
+; CHECK-NEXT: le lr, .LBB31_1
; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup
; CHECK-NEXT: pop {r7, pc}
entry:
@@ -467,15 +900,15 @@ define void @vmulh_s32_pred(ptr noalias nocapture %d, ptr noalias nocapture read
; CHECK-NEXT: cmp r3, #1
; CHECK-NEXT: it lt
; CHECK-NEXT: poplt {r7, pc}
-; CHECK-NEXT: .LBB20_1: @ %vector.ph
+; CHECK-NEXT: .LBB32_1: @ %vector.ph
; CHECK-NEXT: dlstp.32 lr, r3
-; CHECK-NEXT: .LBB20_2: @ %vector.body
+; CHECK-NEXT: .LBB32_2: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vldrw.u32 q0, [r1], #16
; CHECK-NEXT: vldrw.u32 q1, [r2], #16
; CHECK-NEXT: vmulh.s32 q0, q1, q0
; CHECK-NEXT: vstrw.32 q0, [r0], #16
-; CHECK-NEXT: letp lr, .LBB20_2
+; CHECK-NEXT: letp lr, .LBB32_2
; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup
; CHECK-NEXT: pop {r7, pc}
entry:
@@ -517,15 +950,15 @@ define void @vmulh_u32_pred(ptr noalias nocapture %d, ptr noalias nocapture read
; CHECK-NEXT: cmp r3, #1
; CHECK-NEXT: it lt
; CHECK-NEXT: poplt {r7, pc}
-; CHECK-NEXT: .LBB21_1: @ %vector.ph
+; CHECK-NEXT: .LBB33_1: @ %vector.ph
; CHECK-NEXT: dlstp.32 lr, r3
-; CHECK-NEXT: .LBB21_2: @ %vector.body
+; CHECK-NEXT: .LBB33_2: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vldrw.u32 q0, [r1], #16
; CHECK-NEXT: vldrw.u32 q1, [r2], #16
; CHECK-NEXT: vmulh.u32 q0, q1, q0
; CHECK-NEXT: vstrw.32 q0, [r0], #16
-; CHECK-NEXT: letp lr, .LBB21_2
+; CHECK-NEXT: letp lr, .LBB33_2
; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup
; CHECK-NEXT: pop {r7, pc}
entry:
@@ -567,15 +1000,15 @@ define void @vmulh_s16_pred(ptr noalias nocapture %d, ptr noalias nocapture read
; CHECK-NEXT: cmp r3, #1
; CHECK-NEXT: it lt
; CHECK-NEXT: poplt {r7, pc}
-; CHECK-NEXT: .LBB22_1: @ %vector.ph
+; CHECK-NEXT: .LBB34_1: @ %vector.ph
; CHECK-NEXT: dlstp.16 lr, r3
-; CHECK-NEXT: .LBB22_2: @ %vector.body
+; CHECK-NEXT: .LBB34_2: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vldrh.u16 q0, [r1], #16
; CHECK-NEXT: vldrh.u16 q1, [r2], #16
; CHECK-NEXT: vmulh.s16 q0, q1, q0
; CHECK-NEXT: vstrh.16 q0, [r0], #16
-; CHECK-NEXT: letp lr, .LBB22_2
+; CHECK-NEXT: letp lr, .LBB34_2
; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup
; CHECK-NEXT: pop {r7, pc}
entry:
@@ -617,15 +1050,15 @@ define void @vmulh_u16_pred(ptr noalias nocapture %d, ptr noalias nocapture read
; CHECK-NEXT: cmp r3, #1
; CHECK-NEXT: it lt
; CHECK-NEXT: poplt {r7, pc}
-; CHECK-NEXT: .LBB23_1: @ %vector.ph
+; CHECK-NEXT: .LBB35_1: @ %vector.ph
; CHECK-NEXT: dlstp.16 lr, r3
-; CHECK-NEXT: .LBB23_2: @ %vector.body
+; CHECK-NEXT: .LBB35_2: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vldrh.u16 q0, [r1], #16
; CHECK-NEXT: vldrh.u16 q1, [r2], #16
; CHECK-NEXT: vmulh.u16 q0, q1, q0
; CHECK-NEXT: vstrh.16 q0, [r0], #16
-; CHECK-NEXT: letp lr, .LBB23_2
+; CHECK-NEXT: letp lr, .LBB35_2
; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup
; CHECK-NEXT: pop {r7, pc}
entry:
@@ -667,15 +1100,15 @@ define void @vmulh_s8_pred(ptr noalias nocapture %d, ptr noalias nocapture reado
; CHECK-NEXT: cmp r3, #1
; CHECK-NEXT: it lt
; CHECK-NEXT: poplt {r7, pc}
-; CHECK-NEXT: .LBB24_1: @ %vector.ph
+; CHECK-NEXT: .LBB36_1: @ %vector.ph
; CHECK-NEXT: dlstp.8 lr, r3
-; CHECK-NEXT: .LBB24_2: @ %vector.body
+; CHECK-NEXT: .LBB36_2: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vldrb.u8 q0, [r1], #16
; CHECK-NEXT: vldrb.u8 q1, [r2], #16
; CHECK-NEXT: vmulh.s8 q0, q1, q0
; CHECK-NEXT: vstrb.8 q0, [r0], #16
-; CHECK-NEXT: letp lr, .LBB24_2
+; CHECK-NEXT: letp lr, .LBB36_2
; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup
; CHECK-NEXT: pop {r7, pc}
entry:
@@ -717,15 +1150,15 @@ define void @vmulh_u8_pred(ptr noalias nocapture %d, ptr noalias nocapture reado
; CHECK-NEXT: cmp r3, #1
; CHECK-NEXT: it lt
; CHECK-NEXT: poplt {r7, pc}
-; CHECK-NEXT: .LBB25_1: @ %vector.ph
+; CHECK-NEXT: .LBB37_1: @ %vector.ph
; CHECK-NEXT: dlstp.8 lr, r3
-; CHECK-NEXT: .LBB25_2: @ %vector.body
+; CHECK-NEXT: .LBB37_2: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vldrb.u8 q0, [r1], #16
; CHECK-NEXT: vldrb.u8 q1, [r2], #16
; CHECK-NEXT: vmulh.u8 q0, q1, q0
; CHECK-NEXT: vstrb.8 q0, [r0], #16
-; CHECK-NEXT: letp lr, .LBB25_2
+; CHECK-NEXT: letp lr, .LBB37_2
; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup
; CHECK-NEXT: pop {r7, pc}
entry:
More information about the llvm-commits
mailing list