[llvm] 59811f4 - [AArch64] add more tests for extract-bitcast-splat; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 20 05:57:31 PST 2019


Author: Sanjay Patel
Date: 2019-12-20T08:57:23-05:00
New Revision: 59811f454df08924fc35f7e8fb8cb61e8f40e869

URL: https://github.com/llvm/llvm-project/commit/59811f454df08924fc35f7e8fb8cb61e8f40e869
DIFF: https://github.com/llvm/llvm-project/commit/59811f454df08924fc35f7e8fb8cb61e8f40e869.diff

LOG: [AArch64] add more tests for extract-bitcast-splat; NFC

Goes with D71672 - we should be able to handle casting to
a wider type as well as casting to a narrower type.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/arm64-neon-2velem.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/arm64-neon-2velem.ll b/llvm/test/CodeGen/AArch64/arm64-neon-2velem.ll
index 5eec06f552d1..47d82a174853 100644
--- a/llvm/test/CodeGen/AArch64/arm64-neon-2velem.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-neon-2velem.ll
@@ -1686,8 +1686,8 @@ define <2 x float> @test_vmul_laneq2_f32_bitcast(<2 x float> %a, <2 x double> %v
   ret <2 x float> %mul
 }
 
-define <4 x i16> @test_vmul_laneq5_i16_bitcast(<4 x i16> %a, <2 x double> %v) {
-; CHECK-LABEL: test_vmul_laneq5_i16_bitcast:
+define <4 x i16> @test_vadd_laneq5_i16_bitcast(<4 x i16> %a, <2 x double> %v) {
+; CHECK-LABEL: test_vadd_laneq5_i16_bitcast:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
 ; CHECK-NEXT:    dup v1.4h, v1.h[1]
@@ -1700,6 +1700,48 @@ define <4 x i16> @test_vmul_laneq5_i16_bitcast(<4 x i16> %a, <2 x double> %v) {
   ret <4 x i16> %r
 }
 
+define <4 x i16> @test_vadd_lane2_i16_bitcast_bigger_aligned(<4 x i16> %a, <16 x i8> %v) {
+; CHECK-LABEL: test_vadd_lane2_i16_bitcast_bigger_aligned:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.8b, v1.8b, v0.8b, #2
+; CHECK-NEXT:    dup v1.4h, v1.h[1]
+; CHECK-NEXT:    add v0.4h, v1.4h, v0.4h
+; CHECK-NEXT:    ret
+  %extract = shufflevector <16 x i8> %v, <16 x i8> undef, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
+  %bc = bitcast <8 x i8> %extract to <4 x i16>
+  %splat = shufflevector <4 x i16> %bc, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  %r = add <4 x i16> %splat, %a
+  ret <4 x i16> %r
+}
+
+define <4 x i16> @test_vadd_lane5_i16_bitcast_bigger_aligned(<4 x i16> %a, <16 x i8> %v) {
+; CHECK-LABEL: test_vadd_lane5_i16_bitcast_bigger_aligned:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.16b, v1.16b, v1.16b, #8
+; CHECK-NEXT:    dup v1.4h, v1.h[1]
+; CHECK-NEXT:    add v0.4h, v1.4h, v0.4h
+; CHECK-NEXT:    ret
+  %extract = shufflevector <16 x i8> %v, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %bc = bitcast <8 x i8> %extract to <4 x i16>
+  %splat = shufflevector <4 x i16> %bc, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  %r = add <4 x i16> %splat, %a
+  ret <4 x i16> %r
+}
+
+define <4 x i16> @test_vadd_lane_i16_bitcast_bigger_unaligned(<4 x i16> %a, <16 x i8> %v) {
+; CHECK-LABEL: test_vadd_lane_i16_bitcast_bigger_unaligned:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v1.8b, v1.8b, v0.8b, #1
+; CHECK-NEXT:    dup v1.4h, v1.h[1]
+; CHECK-NEXT:    add v0.4h, v1.4h, v0.4h
+; CHECK-NEXT:    ret
+  %extract = shufflevector <16 x i8> %v, <16 x i8> undef, <8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
+  %bc = bitcast <8 x i8> %extract to <4 x i16>
+  %splat = shufflevector <4 x i16> %bc, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  %r = add <4 x i16> %splat, %a
+  ret <4 x i16> %r
+}
+
 define <1 x double> @test_vmul_laneq_f64(<1 x double> %a, <2 x double> %v) {
 ; CHECK-LABEL: test_vmul_laneq_f64:
 ; CHECK:       // %bb.0: // %entry


        


More information about the llvm-commits mailing list