[llvm] 4efcea9 - [ARM][AArch64] Some additional for bitcast splats. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 8 15:08:11 PST 2022


Author: David Green
Date: 2022-12-08T23:08:06Z
New Revision: 4efcea95852abe6ed25ae9a2bf8c3a51a1157675

URL: https://github.com/llvm/llvm-project/commit/4efcea95852abe6ed25ae9a2bf8c3a51a1157675
DIFF: https://github.com/llvm/llvm-project/commit/4efcea95852abe6ed25ae9a2bf8c3a51a1157675.diff

LOG: [ARM][AArch64] Some additional for bitcast splats. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/arm64-dup.ll
    llvm/test/CodeGen/AArch64/arm64-neon-3vdiff.ll
    llvm/test/CodeGen/Thumb2/mve-vdup.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/arm64-dup.ll b/llvm/test/CodeGen/AArch64/arm64-dup.ll
index 241ff1254a11..2c3af5be816c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-dup.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-dup.ll
@@ -504,3 +504,89 @@ define <4 x i32> @dup_const24(<2 x i32> %A, <2 x i32> %B, <4 x i32> %C) nounwind
   %tmp5 = xor <4 x i32> %tmp3, %tmp4
   ret <4 x i32> %tmp5
 }
+
+define <8 x i16> @bitcast_i64_v8i16(i64 %a) {
+; CHECK-LABEL: bitcast_i64_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    dup.8h v0, v0[0]
+; CHECK-NEXT:    ret
+  %b = bitcast i64 %a to <4 x i16>
+  %r = shufflevector <4 x i16> %b, <4 x i16> poison, <8 x i32> zeroinitializer
+  ret <8 x i16> %r
+}
+
+define <8 x i16> @bitcast_i64_v8i16_lane1(i64 %a) {
+; CHECK-LABEL: bitcast_i64_v8i16_lane1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    dup.8h v0, v0[1]
+; CHECK-NEXT:    ret
+  %b = bitcast i64 %a to <4 x i16>
+  %r = shufflevector <4 x i16> %b, <4 x i16> poison, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  ret <8 x i16> %r
+}
+
+define <8 x i16> @bitcast_f64_v8i16(double %a) {
+; CHECK-LABEL: bitcast_f64_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup.8h v0, v0[0]
+; CHECK-NEXT:    ret
+  %b = bitcast double %a to <4 x i16>
+  %r = shufflevector <4 x i16> %b, <4 x i16> poison, <8 x i32> zeroinitializer
+  ret <8 x i16> %r
+}
+
+define <8 x half> @bitcast_i64_v8f16(i64 %a) {
+; CHECK-LABEL: bitcast_i64_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    dup.8h v0, v0[0]
+; CHECK-NEXT:    ret
+  %b = bitcast i64 %a to <4 x half>
+  %r = shufflevector <4 x half> %b, <4 x half> poison, <8 x i32> zeroinitializer
+  ret <8 x half> %r
+}
+
+define <2 x i64> @bitcast_i64_v2f64(i64 %a) {
+; CHECK-LABEL: bitcast_i64_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov d0, x0
+; CHECK-NEXT:    dup.2d v0, v0[0]
+; CHECK-NEXT:    ret
+  %b = bitcast i64 %a to <1 x i64>
+  %r = shufflevector <1 x i64> %b, <1 x i64> poison, <2 x i32> zeroinitializer
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @bitcast_v2f64_v2i64(<2 x double> %a) {
+; CHECK-LABEL: bitcast_v2f64_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup.2d v0, v0[0]
+; CHECK-NEXT:    ret
+  %b = bitcast <2 x double> %a to <2 x i64>
+  %r = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
+  ret <2 x i64> %r
+}
+
+define <2 x i64> @bitcast_v8i16_v2i64(<8 x i16> %a) {
+; CHECK-LABEL: bitcast_v8i16_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup.2d v0, v0[0]
+; CHECK-NEXT:    ret
+  %b = bitcast <8 x i16> %a to <2 x i64>
+  %r = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
+  ret <2 x i64> %r
+}
+
+define <8 x i16> @bitcast_v2f64_v8i16(<2 x i64> %a) {
+; CHECK-LABEL: bitcast_v2f64_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    dup.8h v0, v0[0]
+; CHECK-NEXT:    ret
+  %b = bitcast <2 x i64> %a to <8 x i16>
+  %r = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
+  ret <8 x i16> %r
+}
+

diff  --git a/llvm/test/CodeGen/AArch64/arm64-neon-3v
diff .ll b/llvm/test/CodeGen/AArch64/arm64-neon-3v
diff .ll
index c28732c3d598..0994036e105c 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-3v
diff .ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-3v
diff .ll
@@ -2534,4 +2534,49 @@ entry:
   ret i128 %vmull3.i.i
 }
 
-
+define <8 x i16> @cmplx_mul_combined_re_im(<8 x i16> noundef %a, i64 %scale.coerce) {
+; CHECK-LABEL: cmplx_mul_combined_re_im:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    lsr x8, x0, #16
+; CHECK-NEXT:    fmov d4, x0
+; CHECK-NEXT:    rev32 v5.8h, v0.8h
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    adrp x8, .LCPI196_0
+; CHECK-NEXT:    dup v1.8h, v1.h[0]
+; CHECK-NEXT:    ldr q3, [x8, :lo12:.LCPI196_0]
+; CHECK-NEXT:    sqneg v2.8h, v1.8h
+; CHECK-NEXT:    tbl v1.16b, { v1.16b, v2.16b }, v3.16b
+; CHECK-NEXT:    sqdmull v2.4s, v0.4h, v4.h[0]
+; CHECK-NEXT:    sqdmull2 v0.4s, v0.8h, v4.h[0]
+; CHECK-NEXT:    sqdmlal v2.4s, v5.4h, v1.4h
+; CHECK-NEXT:    sqdmlal2 v0.4s, v5.8h, v1.8h
+; CHECK-NEXT:    uzp2 v0.8h, v2.8h, v0.8h
+; CHECK-NEXT:    ret
+entry:
+  %scale.sroa.2.0.extract.shift23 = lshr i64 %scale.coerce, 16
+  %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+  %vec.scale.coerce = bitcast i64 %scale.coerce to <4 x i16>
+  %vec.scale.sroa.2.0.extract.shift23 = bitcast i64 %scale.sroa.2.0.extract.shift23 to <4 x i16>
+  %vecinit7.i25 = shufflevector <4 x i16> %vec.scale.sroa.2.0.extract.shift23, <4 x i16> poison, <8 x i32> zeroinitializer
+  %vqnegq_v1.i = tail call <8 x i16> @llvm.aarch64.neon.sqneg.v8i16(<8 x i16> %vecinit7.i25)
+  %0 = shufflevector <8 x i16> %vqnegq_v1.i, <8 x i16> %vecinit7.i25, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+  %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %shuffle.i3.i = shufflevector <4 x i16> %vec.scale.coerce, <4 x i16> poison, <4 x i32> zeroinitializer
+  %vqdmull_v2.i.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> %shuffle.i3.i)
+  %shuffle.i.i26 = shufflevector <8 x i16> %a, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  %vqdmull_v2.i.i28 = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i26, <4 x i16> %shuffle.i3.i)
+  %shuffle.i.i29 = shufflevector <8 x i16> %shuffle.i, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %shuffle.i3.i30 = shufflevector <8 x i16> %0, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %vqdmlal2.i.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i29, <4 x i16> %shuffle.i3.i30)
+  %vqdmlal_v3.i.i = tail call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %vqdmull_v2.i.i, <4 x i32> %vqdmlal2.i.i)
+  %shuffle.i.i31 = shufflevector <8 x i16> %shuffle.i, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  %shuffle.i3.i32 = shufflevector <8 x i16> %0, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  %vqdmlal2.i.i33 = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i31, <4 x i16> %shuffle.i3.i32)
+  %vqdmlal_v3.i.i34 = tail call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %vqdmull_v2.i.i28, <4 x i32> %vqdmlal2.i.i33)
+  %1 = bitcast <4 x i32> %vqdmlal_v3.i.i to <8 x i16>
+  %2 = bitcast <4 x i32> %vqdmlal_v3.i.i34 to <8 x i16>
+  %shuffle.i35 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+  ret <8 x i16> %shuffle.i35
+}
+
+declare <8 x i16> @llvm.aarch64.neon.sqneg.v8i16(<8 x i16>)

diff  --git a/llvm/test/CodeGen/Thumb2/mve-vdup.ll b/llvm/test/CodeGen/Thumb2/mve-vdup.ll
index f444ec4ef1e9..74944b3ab76a 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vdup.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vdup.ll
@@ -1,12 +1,19 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s
-; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LE
+; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-LE
+; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-BE
 
 define arm_aapcs_vfpcc <4 x i32> @vdup_i32(i32 %src) {
-; CHECK-LABEL: vdup_i32:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vdup.32 q0, r0
-; CHECK-NEXT:    bx lr
+; CHECK-LE-LABEL: vdup_i32:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vdup.32 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: vdup_i32:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vdup.32 q1, r0
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    bx lr
 entry:
   %0 = insertelement <4 x i32> undef, i32 %src, i32 0
   %out = shufflevector <4 x i32> %0, <4 x i32> undef, <4 x i32> zeroinitializer
@@ -14,10 +21,16 @@ entry:
 }
 
 define arm_aapcs_vfpcc <8 x i16> @vdup_i16(i16 %src) {
-; CHECK-LABEL: vdup_i16:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vdup.16 q0, r0
-; CHECK-NEXT:    bx lr
+; CHECK-LE-LABEL: vdup_i16:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vdup.16 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: vdup_i16:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vdup.16 q1, r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    bx lr
 entry:
   %0 = insertelement <8 x i16> undef, i16 %src, i32 0
   %out = shufflevector <8 x i16> %0, <8 x i16> undef, <8 x i32> zeroinitializer
@@ -25,10 +38,16 @@ entry:
 }
 
 define arm_aapcs_vfpcc <16 x i8> @vdup_i8(i8 %src) {
-; CHECK-LABEL: vdup_i8:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vdup.8 q0, r0
-; CHECK-NEXT:    bx lr
+; CHECK-LE-LABEL: vdup_i8:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vdup.8 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: vdup_i8:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vdup.8 q1, r0
+; CHECK-BE-NEXT:    vrev64.8 q0, q1
+; CHECK-BE-NEXT:    bx lr
 entry:
   %0 = insertelement <16 x i8> undef, i8 %src, i32 0
   %out = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer
@@ -36,11 +55,18 @@ entry:
 }
 
 define arm_aapcs_vfpcc <2 x i64> @vdup_i64(i64 %src) {
-; CHECK-LABEL: vdup_i64:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov q0[2], q0[0], r0, r0
-; CHECK-NEXT:    vmov q0[3], q0[1], r1, r1
-; CHECK-NEXT:    bx lr
+; CHECK-LE-LABEL: vdup_i64:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vmov q0[2], q0[0], r0, r0
+; CHECK-LE-NEXT:    vmov q0[3], q0[1], r1, r1
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: vdup_i64:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vmov q1[2], q1[0], r0, r0
+; CHECK-BE-NEXT:    vmov q1[3], q1[1], r1, r1
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    bx lr
 entry:
   %0 = insertelement <2 x i64> undef, i64 %src, i32 0
   %out = shufflevector <2 x i64> %0, <2 x i64> undef, <2 x i32> zeroinitializer
@@ -48,11 +74,18 @@ entry:
 }
 
 define arm_aapcs_vfpcc <4 x float> @vdup_f32_1(float %src) {
-; CHECK-LABEL: vdup_f32_1:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov r0, s0
-; CHECK-NEXT:    vdup.32 q0, r0
-; CHECK-NEXT:    bx lr
+; CHECK-LE-LABEL: vdup_f32_1:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vmov r0, s0
+; CHECK-LE-NEXT:    vdup.32 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: vdup_f32_1:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vmov r0, s0
+; CHECK-BE-NEXT:    vdup.32 q1, r0
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    bx lr
 entry:
   %0 = insertelement <4 x float> undef, float %src, i32 0
   %out = shufflevector <4 x float> %0, <4 x float> undef, <4 x i32> zeroinitializer
@@ -60,12 +93,20 @@ entry:
 }
 
 define arm_aapcs_vfpcc <4 x float> @vdup_f32_2(float %src1, float %src2) {
-; CHECK-LABEL: vdup_f32_2:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vadd.f32 s0, s0, s1
-; CHECK-NEXT:    vmov r0, s0
-; CHECK-NEXT:    vdup.32 q0, r0
-; CHECK-NEXT:    bx lr
+; CHECK-LE-LABEL: vdup_f32_2:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vadd.f32 s0, s0, s1
+; CHECK-LE-NEXT:    vmov r0, s0
+; CHECK-LE-NEXT:    vdup.32 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: vdup_f32_2:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vadd.f32 s0, s0, s1
+; CHECK-BE-NEXT:    vmov r0, s0
+; CHECK-BE-NEXT:    vdup.32 q1, r0
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    bx lr
 entry:
   %0 = fadd float %src1, %src2
   %1 = insertelement <4 x float> undef, float %0, i32 0
@@ -74,11 +115,18 @@ entry:
 }
 
 define arm_aapcs_vfpcc <4 x float> @vdup_f32_1bc(float %src) {
-; CHECK-LABEL: vdup_f32_1bc:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov r0, s0
-; CHECK-NEXT:    vdup.32 q0, r0
-; CHECK-NEXT:    bx lr
+; CHECK-LE-LABEL: vdup_f32_1bc:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vmov r0, s0
+; CHECK-LE-NEXT:    vdup.32 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: vdup_f32_1bc:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vmov r0, s0
+; CHECK-BE-NEXT:    vdup.32 q1, r0
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    bx lr
 entry:
   %srcbc = bitcast float %src to i32
   %0 = insertelement <4 x i32> undef, i32 %srcbc, i32 0
@@ -88,12 +136,20 @@ entry:
 }
 
 define arm_aapcs_vfpcc <4 x float> @vdup_f32_2bc(float %src1, float %src2) {
-; CHECK-LABEL: vdup_f32_2bc:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vadd.f32 s0, s0, s1
-; CHECK-NEXT:    vmov r0, s0
-; CHECK-NEXT:    vdup.32 q0, r0
-; CHECK-NEXT:    bx lr
+; CHECK-LE-LABEL: vdup_f32_2bc:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vadd.f32 s0, s0, s1
+; CHECK-LE-NEXT:    vmov r0, s0
+; CHECK-LE-NEXT:    vdup.32 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: vdup_f32_2bc:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vadd.f32 s0, s0, s1
+; CHECK-BE-NEXT:    vmov r0, s0
+; CHECK-BE-NEXT:    vdup.32 q1, r0
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    bx lr
 entry:
   %0 = fadd float %src1, %src2
   %bc = bitcast float %0 to i32
@@ -104,12 +160,20 @@ entry:
 }
 
 define arm_aapcs_vfpcc <8 x half> @vdup_f16(half %0, half %1) {
-; CHECK-LABEL: vdup_f16:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vadd.f16 s0, s0, s1
-; CHECK-NEXT:    vmov.f16 r0, s0
-; CHECK-NEXT:    vdup.16 q0, r0
-; CHECK-NEXT:    bx lr
+; CHECK-LE-LABEL: vdup_f16:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vadd.f16 s0, s0, s1
+; CHECK-LE-NEXT:    vmov.f16 r0, s0
+; CHECK-LE-NEXT:    vdup.16 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: vdup_f16:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vadd.f16 s0, s0, s1
+; CHECK-BE-NEXT:    vmov.f16 r0, s0
+; CHECK-BE-NEXT:    vdup.16 q1, r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    bx lr
 entry:
   %2 = fadd half %0, %1
   %3 = insertelement <8 x half> undef, half %2, i32 0
@@ -118,12 +182,20 @@ entry:
 }
 
 define arm_aapcs_vfpcc <8 x half> @vdup_f16_bc(half %0, half %1) {
-; CHECK-LABEL: vdup_f16_bc:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vadd.f16 s0, s0, s1
-; CHECK-NEXT:    vmov.f16 r0, s0
-; CHECK-NEXT:    vdup.16 q0, r0
-; CHECK-NEXT:    bx lr
+; CHECK-LE-LABEL: vdup_f16_bc:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vadd.f16 s0, s0, s1
+; CHECK-LE-NEXT:    vmov.f16 r0, s0
+; CHECK-LE-NEXT:    vdup.16 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: vdup_f16_bc:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vadd.f16 s0, s0, s1
+; CHECK-BE-NEXT:    vmov.f16 r0, s0
+; CHECK-BE-NEXT:    vdup.16 q1, r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    bx lr
 entry:
   %2 = fadd half %0, %1
   %bc = bitcast half %2 to i16
@@ -148,33 +220,57 @@ entry:
 
 
 define arm_aapcs_vfpcc <4 x i32> @vduplane_i32(<4 x i32> %src) {
-; CHECK-LABEL: vduplane_i32:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov r0, s3
-; CHECK-NEXT:    vdup.32 q0, r0
-; CHECK-NEXT:    bx lr
+; CHECK-LE-LABEL: vduplane_i32:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vmov r0, s3
+; CHECK-LE-NEXT:    vdup.32 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: vduplane_i32:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    vmov r0, s7
+; CHECK-BE-NEXT:    vdup.32 q1, r0
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    bx lr
 entry:
   %out = shufflevector <4 x i32> %src, <4 x i32> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
   ret <4 x i32> %out
 }
 
 define arm_aapcs_vfpcc <8 x i16> @vduplane_i16(<8 x i16> %src) {
-; CHECK-LABEL: vduplane_i16:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.u16 r0, q0[3]
-; CHECK-NEXT:    vdup.16 q0, r0
-; CHECK-NEXT:    bx lr
+; CHECK-LE-LABEL: vduplane_i16:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vmov.u16 r0, q0[3]
+; CHECK-LE-NEXT:    vdup.16 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: vduplane_i16:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    vmov.u16 r0, q1[3]
+; CHECK-BE-NEXT:    vdup.16 q1, r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    bx lr
 entry:
   %out = shufflevector <8 x i16> %src, <8 x i16> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
   ret <8 x i16> %out
 }
 
 define arm_aapcs_vfpcc <16 x i8> @vduplane_i8(<16 x i8> %src) {
-; CHECK-LABEL: vduplane_i8:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.u8 r0, q0[3]
-; CHECK-NEXT:    vdup.8 q0, r0
-; CHECK-NEXT:    bx lr
+; CHECK-LE-LABEL: vduplane_i8:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vmov.u8 r0, q0[3]
+; CHECK-LE-NEXT:    vdup.8 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: vduplane_i8:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vrev64.8 q1, q0
+; CHECK-BE-NEXT:    vmov.u8 r0, q1[3]
+; CHECK-BE-NEXT:    vdup.8 q1, r0
+; CHECK-BE-NEXT:    vrev64.8 q0, q1
+; CHECK-BE-NEXT:    bx lr
 entry:
   %out = shufflevector <16 x i8> %src, <16 x i8> undef, <16 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
   ret <16 x i8> %out
@@ -192,22 +288,38 @@ entry:
 }
 
 define arm_aapcs_vfpcc <4 x float> @vduplane_f32(<4 x float> %src) {
-; CHECK-LABEL: vduplane_f32:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov r0, s3
-; CHECK-NEXT:    vdup.32 q0, r0
-; CHECK-NEXT:    bx lr
+; CHECK-LE-LABEL: vduplane_f32:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vmov r0, s3
+; CHECK-LE-NEXT:    vdup.32 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: vduplane_f32:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vrev64.32 q1, q0
+; CHECK-BE-NEXT:    vmov r0, s7
+; CHECK-BE-NEXT:    vdup.32 q1, r0
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    bx lr
 entry:
   %out = shufflevector <4 x float> %src, <4 x float> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
   ret <4 x float> %out
 }
 
 define arm_aapcs_vfpcc <8 x half> @vduplane_f16(<8 x half> %src) {
-; CHECK-LABEL: vduplane_f16:
-; CHECK:       @ %bb.0: @ %entry
-; CHECK-NEXT:    vmov.u16 r0, q0[3]
-; CHECK-NEXT:    vdup.16 q0, r0
-; CHECK-NEXT:    bx lr
+; CHECK-LE-LABEL: vduplane_f16:
+; CHECK-LE:       @ %bb.0: @ %entry
+; CHECK-LE-NEXT:    vmov.u16 r0, q0[3]
+; CHECK-LE-NEXT:    vdup.16 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: vduplane_f16:
+; CHECK-BE:       @ %bb.0: @ %entry
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    vmov.u16 r0, q1[3]
+; CHECK-BE-NEXT:    vdup.16 q1, r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    bx lr
 entry:
   %out = shufflevector <8 x half> %src, <8 x half> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
   ret <8 x half> %out
@@ -252,3 +364,205 @@ entry:
   %ext = extractelement <8 x half> %outbc, i32 2
   ret half %ext
 }
+
+
+define arm_aapcs_vfpcc <8 x i16> @bitcast_i64_v8i16(i64 %a) {
+; CHECK-LE-LABEL: bitcast_i64_v8i16:
+; CHECK-LE:       @ %bb.0:
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    strd r0, r1, [sp]
+; CHECK-LE-NEXT:    mov r0, sp
+; CHECK-LE-NEXT:    vldrh.u32 q0, [r0]
+; CHECK-LE-NEXT:    vmov r0, s0
+; CHECK-LE-NEXT:    vdup.16 q0, r0
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: bitcast_i64_v8i16:
+; CHECK-BE:       @ %bb.0:
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    strd r0, r1, [sp]
+; CHECK-BE-NEXT:    mov r0, sp
+; CHECK-BE-NEXT:    vldrh.u32 q0, [r0]
+; CHECK-BE-NEXT:    vmov r0, s0
+; CHECK-BE-NEXT:    vdup.16 q1, r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+  %b = bitcast i64 %a to <4 x i16>
+  %r = shufflevector <4 x i16> %b, <4 x i16> poison, <8 x i32> zeroinitializer
+  ret <8 x i16> %r
+}
+
+define arm_aapcs_vfpcc <8 x i16> @bitcast_i128_v8i16(i128 %a) {
+; CHECK-LE-LABEL: bitcast_i128_v8i16:
+; CHECK-LE:       @ %bb.0:
+; CHECK-LE-NEXT:    vmov.32 q0[0], r0
+; CHECK-LE-NEXT:    vmov.u16 r0, q0[0]
+; CHECK-LE-NEXT:    vdup.16 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: bitcast_i128_v8i16:
+; CHECK-BE:       @ %bb.0:
+; CHECK-BE-NEXT:    vmov.32 q0[0], r0
+; CHECK-BE-NEXT:    vrev32.16 q0, q0
+; CHECK-BE-NEXT:    vmov.u16 r0, q0[0]
+; CHECK-BE-NEXT:    vdup.16 q1, r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    bx lr
+  %b = bitcast i128 %a to <8 x i16>
+  %r = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
+  ret <8 x i16> %r
+}
+
+define arm_aapcs_vfpcc <8 x i16> @bitcast_i64_v8i16_lane1(i64 %a) {
+; CHECK-LE-LABEL: bitcast_i64_v8i16_lane1:
+; CHECK-LE:       @ %bb.0:
+; CHECK-LE-NEXT:    .pad #8
+; CHECK-LE-NEXT:    sub sp, #8
+; CHECK-LE-NEXT:    strd r0, r1, [sp]
+; CHECK-LE-NEXT:    mov r0, sp
+; CHECK-LE-NEXT:    vldrh.u32 q0, [r0]
+; CHECK-LE-NEXT:    vmov r0, s1
+; CHECK-LE-NEXT:    vdup.16 q0, r0
+; CHECK-LE-NEXT:    add sp, #8
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: bitcast_i64_v8i16_lane1:
+; CHECK-BE:       @ %bb.0:
+; CHECK-BE-NEXT:    .pad #8
+; CHECK-BE-NEXT:    sub sp, #8
+; CHECK-BE-NEXT:    strd r0, r1, [sp]
+; CHECK-BE-NEXT:    mov r0, sp
+; CHECK-BE-NEXT:    vldrh.u32 q0, [r0]
+; CHECK-BE-NEXT:    vmov r0, s1
+; CHECK-BE-NEXT:    vdup.16 q1, r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    add sp, #8
+; CHECK-BE-NEXT:    bx lr
+  %b = bitcast i64 %a to <4 x i16>
+  %r = shufflevector <4 x i16> %b, <4 x i16> poison, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  ret <8 x i16> %r
+}
+
+define arm_aapcs_vfpcc <8 x i16> @bitcast_f64_v8i16(double %a) {
+; CHECK-LE-LABEL: bitcast_f64_v8i16:
+; CHECK-LE:       @ %bb.0:
+; CHECK-LE-NEXT:    vmov.u16 r0, q0[0]
+; CHECK-LE-NEXT:    vdup.16 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: bitcast_f64_v8i16:
+; CHECK-BE:       @ %bb.0:
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    vmov.u16 r0, q1[0]
+; CHECK-BE-NEXT:    vdup.16 q1, r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    bx lr
+  %b = bitcast double %a to <4 x i16>
+  %r = shufflevector <4 x i16> %b, <4 x i16> poison, <8 x i32> zeroinitializer
+  ret <8 x i16> %r
+}
+
+define arm_aapcs_vfpcc <8 x half> @bitcast_i64_v8f16(i64 %a) {
+; CHECK-LE-LABEL: bitcast_i64_v8f16:
+; CHECK-LE:       @ %bb.0:
+; CHECK-LE-NEXT:    vmov.32 q0[0], r0
+; CHECK-LE-NEXT:    vmov.u16 r0, q0[0]
+; CHECK-LE-NEXT:    vdup.16 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: bitcast_i64_v8f16:
+; CHECK-BE:       @ %bb.0:
+; CHECK-BE-NEXT:    vmov.32 q0[0], r0
+; CHECK-BE-NEXT:    vrev32.16 q0, q0
+; CHECK-BE-NEXT:    vmov.u16 r0, q0[0]
+; CHECK-BE-NEXT:    vdup.16 q1, r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    bx lr
+  %b = bitcast i64 %a to <4 x half>
+  %r = shufflevector <4 x half> %b, <4 x half> poison, <8 x i32> zeroinitializer
+  ret <8 x half> %r
+}
+
+define arm_aapcs_vfpcc <2 x i64> @bitcast_i64_v2f64(i64 %a) {
+; CHECK-LE-LABEL: bitcast_i64_v2f64:
+; CHECK-LE:       @ %bb.0:
+; CHECK-LE-NEXT:    vmov q0[2], q0[0], r0, r0
+; CHECK-LE-NEXT:    vmov q0[3], q0[1], r1, r1
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: bitcast_i64_v2f64:
+; CHECK-BE:       @ %bb.0:
+; CHECK-BE-NEXT:    vmov q1[2], q1[0], r0, r0
+; CHECK-BE-NEXT:    vmov q1[3], q1[1], r1, r1
+; CHECK-BE-NEXT:    vrev64.32 q0, q1
+; CHECK-BE-NEXT:    bx lr
+  %b = bitcast i64 %a to <1 x i64>
+  %r = shufflevector <1 x i64> %b, <1 x i64> poison, <2 x i32> zeroinitializer
+  ret <2 x i64> %r
+}
+
+define arm_aapcs_vfpcc <2 x i64> @bitcast_v2f64_v2i64(<2 x double> %a) {
+; CHECK-LABEL: bitcast_v2f64_v2i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f32 s2, s0
+; CHECK-NEXT:    vmov.f32 s3, s1
+; CHECK-NEXT:    bx lr
+  %b = bitcast <2 x double> %a to <2 x i64>
+  %r = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
+  ret <2 x i64> %r
+}
+
+define arm_aapcs_vfpcc <2 x i64> @bitcast_v8i16_v2i64(<8 x i16> %a) {
+; CHECK-LABEL: bitcast_v8i16_v2i64:
+; CHECK:       @ %bb.0:
+; CHECK-NEXT:    vmov.f32 s2, s0
+; CHECK-NEXT:    vmov.f32 s3, s1
+; CHECK-NEXT:    bx lr
+  %b = bitcast <8 x i16> %a to <2 x i64>
+  %r = shufflevector <2 x i64> %b, <2 x i64> poison, <2 x i32> zeroinitializer
+  ret <2 x i64> %r
+}
+
+define arm_aapcs_vfpcc <8 x i16> @bitcast_v2f64_v8i16(<2 x i64> %a) {
+; CHECK-LE-LABEL: bitcast_v2f64_v8i16:
+; CHECK-LE:       @ %bb.0:
+; CHECK-LE-NEXT:    vmov.u16 r0, q0[0]
+; CHECK-LE-NEXT:    vdup.16 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: bitcast_v2f64_v8i16:
+; CHECK-BE:       @ %bb.0:
+; CHECK-BE-NEXT:    vrev64.16 q1, q0
+; CHECK-BE-NEXT:    vmov.u16 r0, q1[0]
+; CHECK-BE-NEXT:    vdup.16 q1, r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    bx lr
+  %b = bitcast <2 x i64> %a to <8 x i16>
+  %r = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
+  ret <8 x i16> %r
+}
+
+define arm_aapcs_vfpcc <8 x i16> @other_max_case(i32 %blockSize) {
+; CHECK-LE-LABEL: other_max_case:
+; CHECK-LE:       @ %bb.0:
+; CHECK-LE-NEXT:    vmov.32 q0[0], r0
+; CHECK-LE-NEXT:    vmov.u16 r0, q0[0]
+; CHECK-LE-NEXT:    vdup.16 q0, r0
+; CHECK-LE-NEXT:    bx lr
+;
+; CHECK-BE-LABEL: other_max_case:
+; CHECK-BE:       @ %bb.0:
+; CHECK-BE-NEXT:    vmov.32 q0[0], r0
+; CHECK-BE-NEXT:    vrev32.16 q0, q0
+; CHECK-BE-NEXT:    vmov.u16 r0, q0[0]
+; CHECK-BE-NEXT:    vdup.16 q1, r0
+; CHECK-BE-NEXT:    vrev64.16 q0, q1
+; CHECK-BE-NEXT:    bx lr
+  %vec.blockSize = bitcast i32 %blockSize to <2 x i16>
+  %.splat2 = shufflevector <2 x i16> %vec.blockSize, <2 x i16> poison, <8 x i32> zeroinitializer
+  ret <8 x i16> %.splat2
+}


        


More information about the llvm-commits mailing list