[clang] bcc5ed7 - [CodeGen] fix test to be (mostly) independent of LLVM optimizer; NFC

Sanjay Patel via cfe-commits cfe-commits at lists.llvm.org
Sun May 10 08:22:42 PDT 2020


Author: Sanjay Patel
Date: 2020-05-10T11:19:43-04:00
New Revision: bcc5ed7b24e921c8902d0d0db614576bd249f128

URL: https://github.com/llvm/llvm-project/commit/bcc5ed7b24e921c8902d0d0db614576bd249f128
DIFF: https://github.com/llvm/llvm-project/commit/bcc5ed7b24e921c8902d0d0db614576bd249f128.diff

LOG: [CodeGen] fix test to be (mostly) independent of LLVM optimizer; NFC

This test would break with the proposed change to IR canonicalization
in D79171. The raw unoptimized IR from clang is massive, so I've
replaced -instcombine with -mem2reg to make it more manageable,
but still be unlikely to break with unrelated changed to optimization.

Added: 
    

Modified: 
    clang/test/CodeGen/aarch64-neon-fp16fml.c

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/aarch64-neon-fp16fml.c b/clang/test/CodeGen/aarch64-neon-fp16fml.c
index 3436d8b212ef..3a96692edc88 100644
--- a/clang/test/CodeGen/aarch64-neon-fp16fml.c
+++ b/clang/test/CodeGen/aarch64-neon-fp16fml.c
@@ -1,5 +1,6 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +v8.2a -target-feature +neon -target-feature +fp16fml \
-// RUN: -fallow-half-arguments-and-returns -disable-O0-optnone -emit-llvm -o - %s | opt -S -instcombine | FileCheck %s
+// RUN: -fallow-half-arguments-and-returns -disable-O0-optnone -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
 
 // REQUIRES: aarch64-registered-target
 
@@ -9,188 +10,1252 @@
 
 // Vector form
 
+// CHECK-LABEL: @test_vfmlal_low_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x half> [[C:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[VFMLAL_LOW3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[C]]) #3
+// CHECK-NEXT:    ret <2 x float> [[VFMLAL_LOW3_I]]
+//
 float32x2_t test_vfmlal_low_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
-// CHECK-LABEL: define <2 x float> @test_vfmlal_low_f16(<2 x float> %a, <4 x half> %b, <4 x half> %c)
-// CHECK: [[RESULT:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal.v2f32.v4f16(<2 x float> %a, <4 x half> %b, <4 x half> %c)
-// CHECK: ret <2 x float> [[RESULT]]
   return vfmlal_low_f16(a, b, c);
 }
 
+// CHECK-LABEL: @test_vfmlsl_low_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x half> [[C:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[VFMLSL_LOW3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[C]]) #3
+// CHECK-NEXT:    ret <2 x float> [[VFMLSL_LOW3_I]]
+//
 float32x2_t test_vfmlsl_low_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
-// CHECK-LABEL: define <2 x float> @test_vfmlsl_low_f16(<2 x float> %a, <4 x half> %b, <4 x half> %c)
-// CHECK: [[RESULT:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl.v2f32.v4f16(<2 x float> %a, <4 x half> %b, <4 x half> %c)
-// CHECK: ret <2 x float> [[RESULT]]
   return vfmlsl_low_f16(a, b, c);
 }
 
+// CHECK-LABEL: @test_vfmlal_high_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x half> [[C:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[VFMLAL_HIGH3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal2.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[C]]) #3
+// CHECK-NEXT:    ret <2 x float> [[VFMLAL_HIGH3_I]]
+//
 float32x2_t test_vfmlal_high_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
-// CHECK-LABEL: define <2 x float> @test_vfmlal_high_f16(<2 x float> %a, <4 x half> %b, <4 x half> %c)
-// CHECK: [[RESULT:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal2.v2f32.v4f16(<2 x float> %a, <4 x half> %b, <4 x half> %c)
-// CHECK: ret <2 x float> [[RESULT]]
   return vfmlal_high_f16(a, b, c);
 }
 
+// CHECK-LABEL: @test_vfmlsl_high_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x half> [[C:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[VFMLSL_HIGH3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl2.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[C]]) #3
+// CHECK-NEXT:    ret <2 x float> [[VFMLSL_HIGH3_I]]
+//
 float32x2_t test_vfmlsl_high_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
-// CHECK-LABEL: define <2 x float> @test_vfmlsl_high_f16(<2 x float> %a, <4 x half> %b, <4 x half> %c)
-// CHECK: [[RESULT:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl2.v2f32.v4f16(<2 x float> %a, <4 x half> %b, <4 x half> %c)
-// CHECK: ret <2 x float> [[RESULT]]
   return vfmlsl_high_f16(a, b, c);
 }
 
+// CHECK-LABEL: @test_vfmlalq_low_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x half> [[C:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[VFMLAL_LOW3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[C]]) #3
+// CHECK-NEXT:    ret <4 x float> [[VFMLAL_LOW3_I]]
+//
 float32x4_t test_vfmlalq_low_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
-// CHECK-LABEL: define <4 x float> @test_vfmlalq_low_f16(<4 x float> %a, <8 x half> %b, <8 x half> %c)
-// CHECK: [[RESULT:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal.v4f32.v8f16(<4 x float> %a, <8 x half> %b, <8 x half> %c)
-// CHECK: ret <4 x float> [[RESULT]]
   return vfmlalq_low_f16(a, b, c);
 }
 
+// CHECK-LABEL: @test_vfmlslq_low_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x half> [[C:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[VFMLSL_LOW3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[C]]) #3
+// CHECK-NEXT:    ret <4 x float> [[VFMLSL_LOW3_I]]
+//
 float32x4_t test_vfmlslq_low_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
-// CHECK-LABEL: define <4 x float> @test_vfmlslq_low_f16(<4 x float> %a, <8 x half> %b, <8 x half> %c)
-// CHECK: [[RESULT:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl.v4f32.v8f16(<4 x float> %a, <8 x half> %b, <8 x half> %c)
-// CHECK: ret <4 x float> [[RESULT]]
   return vfmlslq_low_f16(a, b, c);
 }
 
+// CHECK-LABEL: @test_vfmlalq_high_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x half> [[C:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[VFMLAL_HIGH3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal2.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[C]]) #3
+// CHECK-NEXT:    ret <4 x float> [[VFMLAL_HIGH3_I]]
+//
 float32x4_t test_vfmlalq_high_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
-// CHECK-LABEL: define <4 x float> @test_vfmlalq_high_f16(<4 x float> %a, <8 x half> %b, <8 x half> %c)
-// CHECK: [[RESULT:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal2.v4f32.v8f16(<4 x float> %a, <8 x half> %b, <8 x half> %c)
-// CHECK: ret <4 x float> [[RESULT]]
   return vfmlalq_high_f16(a, b, c);
 }
 
+// CHECK-LABEL: @test_vfmlslq_high_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x half> [[C:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[VFMLSL_HIGH3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl2.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[C]]) #3
+// CHECK-NEXT:    ret <4 x float> [[VFMLSL_HIGH3_I]]
+//
 float32x4_t test_vfmlslq_high_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
-// CHECK-LABEL: define <4 x float> @test_vfmlslq_high_f16(<4 x float> %a, <8 x half> %b, <8 x half> %c)
-// CHECK: [[RESULT:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl2.v4f32.v8f16(<4 x float> %a, <8 x half> %b, <8 x half> %c)
-// CHECK: ret <4 x float> [[RESULT]]
   return vfmlslq_high_f16(a, b, c);
 }
 
 // Indexed form
 
+// CHECK-LABEL: @test_vfmlal_lane_low_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_716:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_716:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7164:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_7165:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71614:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71615:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71624:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71625:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
+// CHECK-NEXT:    [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 0
+// CHECK-NEXT:    store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
+// CHECK-NEXT:    [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 0
+// CHECK-NEXT:    store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
+// CHECK-NEXT:    [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 0
+// CHECK-NEXT:    store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
+// CHECK-NEXT:    [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 0
+// CHECK-NEXT:    store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
+// CHECK-NEXT:    [[VFMLAL_LOW3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]]) #3
+// CHECK-NEXT:    ret <2 x float> [[VFMLAL_LOW3_I]]
+//
 float32x2_t test_vfmlal_lane_low_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
-// CHECK-LABEL: define <2 x float> @test_vfmlal_lane_low_f16(<2 x float> %a, <4 x half> %b, <4 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <4 x half> %c, <4 x half> undef, <4 x i32> zeroinitializer
-// CHECK: [[RESULT:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal.v2f32.v4f16(<2 x float> %a, <4 x half> %b, <4 x half> [[SHUFFLE]])
-// CHECK: ret <2 x float> [[RESULT]]
   return vfmlal_lane_low_f16(a, b, c, 0);
 }
 
+// CHECK-LABEL: @test_vfmlal_lane_high_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_716:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_716:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7164:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_7165:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71614:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71615:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71624:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71625:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
+// CHECK-NEXT:    [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 1
+// CHECK-NEXT:    store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
+// CHECK-NEXT:    [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 1
+// CHECK-NEXT:    store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
+// CHECK-NEXT:    [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 1
+// CHECK-NEXT:    store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
+// CHECK-NEXT:    [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 1
+// CHECK-NEXT:    store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
+// CHECK-NEXT:    [[VFMLAL_HIGH3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal2.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]]) #3
+// CHECK-NEXT:    ret <2 x float> [[VFMLAL_HIGH3_I]]
+//
 float32x2_t test_vfmlal_lane_high_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
-// CHECK-LABEL: define <2 x float> @test_vfmlal_lane_high_f16(<2 x float> %a, <4 x half> %b, <4 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <4 x half> %c, <4 x half> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
-// CHECK: [[RESULT:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal2.v2f32.v4f16(<2 x float> %a, <4 x half> %b, <4 x half> [[SHUFFLE]])
-// CHECK: ret <2 x float> [[RESULT]]
   return vfmlal_lane_high_f16(a, b, c, 1);
 }
 
+// CHECK-LABEL: @test_vfmlalq_lane_low_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_716:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_716:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7164:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_7165:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71614:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71615:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71624:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71625:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71634:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71635:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71644:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71645:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71654:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71655:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71664:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71665:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
+// CHECK-NEXT:    [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
+// CHECK-NEXT:    [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
+// CHECK-NEXT:    [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
+// CHECK-NEXT:    [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71634]], align 8
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_71634]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP17:%.*]] = load <4 x i16>, <4 x i16>* [[TMP16]], align 8
+// CHECK-NEXT:    [[VGET_LANE38:%.*]] = extractelement <4 x i16> [[TMP17]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE38]], i16* [[__REINT1_71635]], align 2
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast i16* [[__REINT1_71635]] to half*
+// CHECK-NEXT:    [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
+// CHECK-NEXT:    [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71644]], align 8
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_71644]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP21:%.*]] = load <4 x i16>, <4 x i16>* [[TMP20]], align 8
+// CHECK-NEXT:    [[VGET_LANE48:%.*]] = extractelement <4 x i16> [[TMP21]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE48]], i16* [[__REINT1_71645]], align 2
+// CHECK-NEXT:    [[TMP22:%.*]] = bitcast i16* [[__REINT1_71645]] to half*
+// CHECK-NEXT:    [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
+// CHECK-NEXT:    [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71654]], align 8
+// CHECK-NEXT:    [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_71654]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP25:%.*]] = load <4 x i16>, <4 x i16>* [[TMP24]], align 8
+// CHECK-NEXT:    [[VGET_LANE58:%.*]] = extractelement <4 x i16> [[TMP25]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE58]], i16* [[__REINT1_71655]], align 2
+// CHECK-NEXT:    [[TMP26:%.*]] = bitcast i16* [[__REINT1_71655]] to half*
+// CHECK-NEXT:    [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
+// CHECK-NEXT:    [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71664]], align 8
+// CHECK-NEXT:    [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_71664]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP29:%.*]] = load <4 x i16>, <4 x i16>* [[TMP28]], align 8
+// CHECK-NEXT:    [[VGET_LANE68:%.*]] = extractelement <4 x i16> [[TMP29]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE68]], i16* [[__REINT1_71665]], align 2
+// CHECK-NEXT:    [[TMP30:%.*]] = bitcast i16* [[__REINT1_71665]] to half*
+// CHECK-NEXT:    [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
+// CHECK-NEXT:    [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
+// CHECK-NEXT:    [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
+// CHECK-NEXT:    [[VFMLAL_LOW3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]]) #3
+// CHECK-NEXT:    ret <4 x float> [[VFMLAL_LOW3_I]]
+//
 float32x4_t test_vfmlalq_lane_low_f16(float32x4_t a, float16x8_t b, float16x4_t c) {
-// CHECK-LABEL: define <4 x float> @test_vfmlalq_lane_low_f16(<4 x float> %a, <8 x half> %b, <4 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <4 x half> %c, <4 x half> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
-// CHECK: [[RESULT:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal.v4f32.v8f16(<4 x float> %a, <8 x half> %b, <8 x half> [[SHUFFLE]])
-// CHECK: ret <4 x float> [[RESULT]]
   return vfmlalq_lane_low_f16(a, b, c, 2);
 }
 
+// CHECK-LABEL: @test_vfmlalq_lane_high_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_716:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_716:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7164:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_7165:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71614:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71615:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71624:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71625:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71634:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71635:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71644:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71645:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71654:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71655:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71664:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71665:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
+// CHECK-NEXT:    [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
+// CHECK-NEXT:    [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
+// CHECK-NEXT:    [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
+// CHECK-NEXT:    [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71634]], align 8
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_71634]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP17:%.*]] = load <4 x i16>, <4 x i16>* [[TMP16]], align 8
+// CHECK-NEXT:    [[VGET_LANE38:%.*]] = extractelement <4 x i16> [[TMP17]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE38]], i16* [[__REINT1_71635]], align 2
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast i16* [[__REINT1_71635]] to half*
+// CHECK-NEXT:    [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
+// CHECK-NEXT:    [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71644]], align 8
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_71644]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP21:%.*]] = load <4 x i16>, <4 x i16>* [[TMP20]], align 8
+// CHECK-NEXT:    [[VGET_LANE48:%.*]] = extractelement <4 x i16> [[TMP21]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE48]], i16* [[__REINT1_71645]], align 2
+// CHECK-NEXT:    [[TMP22:%.*]] = bitcast i16* [[__REINT1_71645]] to half*
+// CHECK-NEXT:    [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
+// CHECK-NEXT:    [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71654]], align 8
+// CHECK-NEXT:    [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_71654]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP25:%.*]] = load <4 x i16>, <4 x i16>* [[TMP24]], align 8
+// CHECK-NEXT:    [[VGET_LANE58:%.*]] = extractelement <4 x i16> [[TMP25]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE58]], i16* [[__REINT1_71655]], align 2
+// CHECK-NEXT:    [[TMP26:%.*]] = bitcast i16* [[__REINT1_71655]] to half*
+// CHECK-NEXT:    [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
+// CHECK-NEXT:    [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71664]], align 8
+// CHECK-NEXT:    [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_71664]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP29:%.*]] = load <4 x i16>, <4 x i16>* [[TMP28]], align 8
+// CHECK-NEXT:    [[VGET_LANE68:%.*]] = extractelement <4 x i16> [[TMP29]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE68]], i16* [[__REINT1_71665]], align 2
+// CHECK-NEXT:    [[TMP30:%.*]] = bitcast i16* [[__REINT1_71665]] to half*
+// CHECK-NEXT:    [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
+// CHECK-NEXT:    [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
+// CHECK-NEXT:    [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
+// CHECK-NEXT:    [[VFMLAL_HIGH3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal2.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]]) #3
+// CHECK-NEXT:    ret <4 x float> [[VFMLAL_HIGH3_I]]
+//
 float32x4_t test_vfmlalq_lane_high_f16(float32x4_t a, float16x8_t b, float16x4_t c) {
-// CHECK-LABEL: define <4 x float> @test_vfmlalq_lane_high_f16(<4 x float> %a, <8 x half> %b, <4 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <4 x half> %c, <4 x half> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
-// CHECK: [[RESULT:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal2.v4f32.v8f16(<4 x float> %a, <8 x half> %b, <8 x half> [[SHUFFLE]])
-// CHECK: ret <4 x float> [[RESULT]]
   return vfmlalq_lane_high_f16(a, b, c, 3);
 }
 
+// CHECK-LABEL: @test_vfmlal_laneq_low_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_719:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_719:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7194:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_7195:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71914:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71915:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71924:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71925:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 4
+// CHECK-NEXT:    store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 4
+// CHECK-NEXT:    store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 4
+// CHECK-NEXT:    store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 4
+// CHECK-NEXT:    store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
+// CHECK-NEXT:    [[VFMLAL_LOW3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]]) #3
+// CHECK-NEXT:    ret <2 x float> [[VFMLAL_LOW3_I]]
+//
 float32x2_t test_vfmlal_laneq_low_f16(float32x2_t a, float16x4_t b, float16x8_t c) {
-// CHECK-LABEL: define <2 x float> @test_vfmlal_laneq_low_f16(<2 x float> %a, <4 x half> %b, <8 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <8 x half> %c, <8 x half> undef, <4 x i32> <i32 4, i32 4, i32 4, i32 4>
-// CHECK: [[RESULT:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal.v2f32.v4f16(<2 x float> %a, <4 x half> %b, <4 x half> [[SHUFFLE]])
-// CHECK: ret <2 x float> [[RESULT]]
   return vfmlal_laneq_low_f16(a, b, c, 4);
 }
 
+// CHECK-LABEL: @test_vfmlal_laneq_high_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_719:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_719:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7194:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_7195:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71914:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71915:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71924:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71925:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 5
+// CHECK-NEXT:    store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 5
+// CHECK-NEXT:    store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 5
+// CHECK-NEXT:    store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 5
+// CHECK-NEXT:    store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
+// CHECK-NEXT:    [[VFMLAL_HIGH3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal2.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]]) #3
+// CHECK-NEXT:    ret <2 x float> [[VFMLAL_HIGH3_I]]
+//
 float32x2_t test_vfmlal_laneq_high_f16(float32x2_t a, float16x4_t b, float16x8_t c) {
-// CHECK-LABEL: define <2 x float> @test_vfmlal_laneq_high_f16(<2 x float> %a, <4 x half> %b, <8 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <8 x half> %c, <8 x half> undef, <4 x i32> <i32 5, i32 5, i32 5, i32 5>
-// CHECK: [[RESULT:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlal2.v2f32.v4f16(<2 x float> %a, <4 x half> %b, <4 x half> [[SHUFFLE]])
-// CHECK: ret <2 x float> [[RESULT]]
   return vfmlal_laneq_high_f16(a, b, c, 5);
 }
 
+// CHECK-LABEL: @test_vfmlalq_laneq_low_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_719:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_719:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7194:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_7195:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71914:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71915:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71924:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71925:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71934:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71935:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71944:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71945:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71954:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71955:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71964:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71965:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71934]], align 16
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_71934]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP17:%.*]] = load <8 x i16>, <8 x i16>* [[TMP16]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE38:%.*]] = extractelement <8 x i16> [[TMP17]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE38]], i16* [[__REINT1_71935]], align 2
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast i16* [[__REINT1_71935]] to half*
+// CHECK-NEXT:    [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
+// CHECK-NEXT:    [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71944]], align 16
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_71944]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP21:%.*]] = load <8 x i16>, <8 x i16>* [[TMP20]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE48:%.*]] = extractelement <8 x i16> [[TMP21]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE48]], i16* [[__REINT1_71945]], align 2
+// CHECK-NEXT:    [[TMP22:%.*]] = bitcast i16* [[__REINT1_71945]] to half*
+// CHECK-NEXT:    [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
+// CHECK-NEXT:    [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71954]], align 16
+// CHECK-NEXT:    [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_71954]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP25:%.*]] = load <8 x i16>, <8 x i16>* [[TMP24]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE58:%.*]] = extractelement <8 x i16> [[TMP25]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE58]], i16* [[__REINT1_71955]], align 2
+// CHECK-NEXT:    [[TMP26:%.*]] = bitcast i16* [[__REINT1_71955]] to half*
+// CHECK-NEXT:    [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
+// CHECK-NEXT:    [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71964]], align 16
+// CHECK-NEXT:    [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_71964]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP29:%.*]] = load <8 x i16>, <8 x i16>* [[TMP28]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE68:%.*]] = extractelement <8 x i16> [[TMP29]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE68]], i16* [[__REINT1_71965]], align 2
+// CHECK-NEXT:    [[TMP30:%.*]] = bitcast i16* [[__REINT1_71965]] to half*
+// CHECK-NEXT:    [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
+// CHECK-NEXT:    [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
+// CHECK-NEXT:    [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
+// CHECK-NEXT:    [[VFMLAL_LOW3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]]) #3
+// CHECK-NEXT:    ret <4 x float> [[VFMLAL_LOW3_I]]
+//
 float32x4_t test_vfmlalq_laneq_low_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
-// CHECK-LABEL: define <4 x float> @test_vfmlalq_laneq_low_f16(<4 x float> %a, <8 x half> %b, <8 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <8 x half> %c, <8 x half> undef, <8 x i32> <i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6>
-// CHECK: [[RESULT:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal.v4f32.v8f16(<4 x float> %a, <8 x half> %b, <8 x half> [[SHUFFLE]])
-// CHECK: ret <4 x float> [[RESULT]]
   return vfmlalq_laneq_low_f16(a, b, c, 6);
 }
 
+// CHECK-LABEL: @test_vfmlalq_laneq_high_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_719:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_719:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7194:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_7195:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71914:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71915:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71924:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71925:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71934:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71935:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71944:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71945:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71954:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71955:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71964:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71965:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71934]], align 16
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_71934]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP17:%.*]] = load <8 x i16>, <8 x i16>* [[TMP16]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE38:%.*]] = extractelement <8 x i16> [[TMP17]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE38]], i16* [[__REINT1_71935]], align 2
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast i16* [[__REINT1_71935]] to half*
+// CHECK-NEXT:    [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
+// CHECK-NEXT:    [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71944]], align 16
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_71944]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP21:%.*]] = load <8 x i16>, <8 x i16>* [[TMP20]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE48:%.*]] = extractelement <8 x i16> [[TMP21]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE48]], i16* [[__REINT1_71945]], align 2
+// CHECK-NEXT:    [[TMP22:%.*]] = bitcast i16* [[__REINT1_71945]] to half*
+// CHECK-NEXT:    [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
+// CHECK-NEXT:    [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71954]], align 16
+// CHECK-NEXT:    [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_71954]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP25:%.*]] = load <8 x i16>, <8 x i16>* [[TMP24]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE58:%.*]] = extractelement <8 x i16> [[TMP25]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE58]], i16* [[__REINT1_71955]], align 2
+// CHECK-NEXT:    [[TMP26:%.*]] = bitcast i16* [[__REINT1_71955]] to half*
+// CHECK-NEXT:    [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
+// CHECK-NEXT:    [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71964]], align 16
+// CHECK-NEXT:    [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_71964]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP29:%.*]] = load <8 x i16>, <8 x i16>* [[TMP28]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE68:%.*]] = extractelement <8 x i16> [[TMP29]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE68]], i16* [[__REINT1_71965]], align 2
+// CHECK-NEXT:    [[TMP30:%.*]] = bitcast i16* [[__REINT1_71965]] to half*
+// CHECK-NEXT:    [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
+// CHECK-NEXT:    [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
+// CHECK-NEXT:    [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
+// CHECK-NEXT:    [[VFMLAL_HIGH3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal2.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]]) #3
+// CHECK-NEXT:    ret <4 x float> [[VFMLAL_HIGH3_I]]
+//
 float32x4_t test_vfmlalq_laneq_high_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
-// CHECK-LABEL: define <4 x float> @test_vfmlalq_laneq_high_f16(<4 x float> %a, <8 x half> %b, <8 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <8 x half> %c, <8 x half> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
-// CHECK: [[RESULT:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlal2.v4f32.v8f16(<4 x float> %a, <8 x half> %b, <8 x half> [[SHUFFLE]])
-// CHECK: ret <4 x float> [[RESULT]]
   return vfmlalq_laneq_high_f16(a, b, c, 7);
 }
 
+// CHECK-LABEL: @test_vfmlsl_lane_low_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_716:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_716:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7164:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_7165:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71614:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71615:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71624:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71625:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
+// CHECK-NEXT:    [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 0
+// CHECK-NEXT:    store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
+// CHECK-NEXT:    [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 0
+// CHECK-NEXT:    store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
+// CHECK-NEXT:    [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 0
+// CHECK-NEXT:    store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
+// CHECK-NEXT:    [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 0
+// CHECK-NEXT:    store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
+// CHECK-NEXT:    [[VFMLSL_LOW3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]]) #3
+// CHECK-NEXT:    ret <2 x float> [[VFMLSL_LOW3_I]]
+//
 float32x2_t test_vfmlsl_lane_low_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
-// CHECK-LABEL: define <2 x float> @test_vfmlsl_lane_low_f16(<2 x float> %a, <4 x half> %b, <4 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <4 x half> %c, <4 x half> undef, <4 x i32> zeroinitializer
-// CHECK: [[RESULT:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl.v2f32.v4f16(<2 x float> %a, <4 x half> %b, <4 x half> [[SHUFFLE]])
-// CHECK: ret <2 x float> [[RESULT]]
   return vfmlsl_lane_low_f16(a, b, c, 0);
 }
 
+// CHECK-LABEL: @test_vfmlsl_lane_high_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_716:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_716:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7164:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_7165:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71614:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71615:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71624:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71625:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
+// CHECK-NEXT:    [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 1
+// CHECK-NEXT:    store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
+// CHECK-NEXT:    [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 1
+// CHECK-NEXT:    store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
+// CHECK-NEXT:    [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 1
+// CHECK-NEXT:    store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
+// CHECK-NEXT:    [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 1
+// CHECK-NEXT:    store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
+// CHECK-NEXT:    [[VFMLSL_HIGH3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl2.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]]) #3
+// CHECK-NEXT:    ret <2 x float> [[VFMLSL_HIGH3_I]]
+//
 float32x2_t test_vfmlsl_lane_high_f16(float32x2_t a, float16x4_t b, float16x4_t c) {
-// CHECK-LABEL: define <2 x float> @test_vfmlsl_lane_high_f16(<2 x float> %a, <4 x half> %b, <4 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <4 x half> %c, <4 x half> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
-// CHECK: [[RESULT:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl2.v2f32.v4f16(<2 x float> %a, <4 x half> %b, <4 x half> [[SHUFFLE]])
-// CHECK: ret <2 x float> [[RESULT]]
   return vfmlsl_lane_high_f16(a, b, c, 1);
 }
 
+// CHECK-LABEL: @test_vfmlslq_lane_low_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_716:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_716:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7164:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_7165:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71614:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71615:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71624:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71625:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71634:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71635:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71644:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71645:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71654:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71655:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71664:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71665:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
+// CHECK-NEXT:    [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
+// CHECK-NEXT:    [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
+// CHECK-NEXT:    [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
+// CHECK-NEXT:    [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71634]], align 8
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_71634]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP17:%.*]] = load <4 x i16>, <4 x i16>* [[TMP16]], align 8
+// CHECK-NEXT:    [[VGET_LANE38:%.*]] = extractelement <4 x i16> [[TMP17]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE38]], i16* [[__REINT1_71635]], align 2
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast i16* [[__REINT1_71635]] to half*
+// CHECK-NEXT:    [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
+// CHECK-NEXT:    [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71644]], align 8
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_71644]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP21:%.*]] = load <4 x i16>, <4 x i16>* [[TMP20]], align 8
+// CHECK-NEXT:    [[VGET_LANE48:%.*]] = extractelement <4 x i16> [[TMP21]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE48]], i16* [[__REINT1_71645]], align 2
+// CHECK-NEXT:    [[TMP22:%.*]] = bitcast i16* [[__REINT1_71645]] to half*
+// CHECK-NEXT:    [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
+// CHECK-NEXT:    [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71654]], align 8
+// CHECK-NEXT:    [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_71654]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP25:%.*]] = load <4 x i16>, <4 x i16>* [[TMP24]], align 8
+// CHECK-NEXT:    [[VGET_LANE58:%.*]] = extractelement <4 x i16> [[TMP25]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE58]], i16* [[__REINT1_71655]], align 2
+// CHECK-NEXT:    [[TMP26:%.*]] = bitcast i16* [[__REINT1_71655]] to half*
+// CHECK-NEXT:    [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
+// CHECK-NEXT:    [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71664]], align 8
+// CHECK-NEXT:    [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_71664]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP29:%.*]] = load <4 x i16>, <4 x i16>* [[TMP28]], align 8
+// CHECK-NEXT:    [[VGET_LANE68:%.*]] = extractelement <4 x i16> [[TMP29]], i32 2
+// CHECK-NEXT:    store i16 [[VGET_LANE68]], i16* [[__REINT1_71665]], align 2
+// CHECK-NEXT:    [[TMP30:%.*]] = bitcast i16* [[__REINT1_71665]] to half*
+// CHECK-NEXT:    [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
+// CHECK-NEXT:    [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
+// CHECK-NEXT:    [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
+// CHECK-NEXT:    [[VFMLSL_LOW3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]]) #3
+// CHECK-NEXT:    ret <4 x float> [[VFMLSL_LOW3_I]]
+//
 float32x4_t test_vfmlslq_lane_low_f16(float32x4_t a, float16x8_t b, float16x4_t c) {
-// CHECK-LABEL: define <4 x float> @test_vfmlslq_lane_low_f16(<4 x float> %a, <8 x half> %b, <4 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <4 x half> %c, <4 x half> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
-// CHECK: [[RESULT:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl.v4f32.v8f16(<4 x float> %a, <8 x half> %b, <8 x half> [[SHUFFLE]])
-// CHECK: ret <4 x float> [[RESULT]]
   return vfmlslq_lane_low_f16(a, b, c, 2);
 }
 
+// CHECK-LABEL: @test_vfmlslq_lane_high_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_716:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_716:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7164:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_7165:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71614:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71615:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71624:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71625:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71634:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71635:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71644:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71645:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71654:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71655:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71664:%.*]] = alloca <4 x half>, align 8
+// CHECK-NEXT:    [[__REINT1_71665:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <4 x half> [[C:%.*]], <4 x half>* [[__REINT_716]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x half>* [[__REINT_716]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 8
+// CHECK-NEXT:    [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE]], i16* [[__REINT1_716]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_716]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_7164]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <4 x half>* [[__REINT_7164]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <4 x i16>, <4 x i16>* [[TMP4]], align 8
+// CHECK-NEXT:    [[VGET_LANE8:%.*]] = extractelement <4 x i16> [[TMP5]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE8]], i16* [[__REINT1_7165]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7165]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71614]], align 8
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <4 x half>* [[__REINT_71614]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i16>, <4 x i16>* [[TMP8]], align 8
+// CHECK-NEXT:    [[VGET_LANE18:%.*]] = extractelement <4 x i16> [[TMP9]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE18]], i16* [[__REINT1_71615]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71615]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71624]], align 8
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <4 x half>* [[__REINT_71624]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <4 x i16>, <4 x i16>* [[TMP12]], align 8
+// CHECK-NEXT:    [[VGET_LANE28:%.*]] = extractelement <4 x i16> [[TMP13]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE28]], i16* [[__REINT1_71625]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71625]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71634]], align 8
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <4 x half>* [[__REINT_71634]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP17:%.*]] = load <4 x i16>, <4 x i16>* [[TMP16]], align 8
+// CHECK-NEXT:    [[VGET_LANE38:%.*]] = extractelement <4 x i16> [[TMP17]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE38]], i16* [[__REINT1_71635]], align 2
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast i16* [[__REINT1_71635]] to half*
+// CHECK-NEXT:    [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
+// CHECK-NEXT:    [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71644]], align 8
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <4 x half>* [[__REINT_71644]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP21:%.*]] = load <4 x i16>, <4 x i16>* [[TMP20]], align 8
+// CHECK-NEXT:    [[VGET_LANE48:%.*]] = extractelement <4 x i16> [[TMP21]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE48]], i16* [[__REINT1_71645]], align 2
+// CHECK-NEXT:    [[TMP22:%.*]] = bitcast i16* [[__REINT1_71645]] to half*
+// CHECK-NEXT:    [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
+// CHECK-NEXT:    [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71654]], align 8
+// CHECK-NEXT:    [[TMP24:%.*]] = bitcast <4 x half>* [[__REINT_71654]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP25:%.*]] = load <4 x i16>, <4 x i16>* [[TMP24]], align 8
+// CHECK-NEXT:    [[VGET_LANE58:%.*]] = extractelement <4 x i16> [[TMP25]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE58]], i16* [[__REINT1_71655]], align 2
+// CHECK-NEXT:    [[TMP26:%.*]] = bitcast i16* [[__REINT1_71655]] to half*
+// CHECK-NEXT:    [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
+// CHECK-NEXT:    [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
+// CHECK-NEXT:    store <4 x half> [[C]], <4 x half>* [[__REINT_71664]], align 8
+// CHECK-NEXT:    [[TMP28:%.*]] = bitcast <4 x half>* [[__REINT_71664]] to <4 x i16>*
+// CHECK-NEXT:    [[TMP29:%.*]] = load <4 x i16>, <4 x i16>* [[TMP28]], align 8
+// CHECK-NEXT:    [[VGET_LANE68:%.*]] = extractelement <4 x i16> [[TMP29]], i32 3
+// CHECK-NEXT:    store i16 [[VGET_LANE68]], i16* [[__REINT1_71665]], align 2
+// CHECK-NEXT:    [[TMP30:%.*]] = bitcast i16* [[__REINT1_71665]] to half*
+// CHECK-NEXT:    [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
+// CHECK-NEXT:    [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
+// CHECK-NEXT:    [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
+// CHECK-NEXT:    [[VFMLSL_HIGH3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl2.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]]) #3
+// CHECK-NEXT:    ret <4 x float> [[VFMLSL_HIGH3_I]]
+//
 float32x4_t test_vfmlslq_lane_high_f16(float32x4_t a, float16x8_t b, float16x4_t c) {
-// CHECK-LABEL: define <4 x float> @test_vfmlslq_lane_high_f16(<4 x float> %a, <8 x half> %b, <4 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <4 x half> %c, <4 x half> undef, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
-// CHECK: [[RESULT:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl2.v4f32.v8f16(<4 x float> %a, <8 x half> %b, <8 x half> [[SHUFFLE]])
-// CHECK: ret <4 x float> [[RESULT]]
   return vfmlslq_lane_high_f16(a, b, c, 3);
 }
 
+// CHECK-LABEL: @test_vfmlsl_laneq_low_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_719:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_719:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7194:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_7195:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71914:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71915:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71924:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71925:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 4
+// CHECK-NEXT:    store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 4
+// CHECK-NEXT:    store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 4
+// CHECK-NEXT:    store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 4
+// CHECK-NEXT:    store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
+// CHECK-NEXT:    [[VFMLSL_LOW3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]]) #3
+// CHECK-NEXT:    ret <2 x float> [[VFMLSL_LOW3_I]]
+//
 float32x2_t test_vfmlsl_laneq_low_f16(float32x2_t a, float16x4_t b, float16x8_t c) {
-// CHECK-LABEL: define <2 x float> @test_vfmlsl_laneq_low_f16(<2 x float> %a, <4 x half> %b, <8 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <8 x half> %c, <8 x half> undef, <4 x i32> <i32 4, i32 4, i32 4, i32 4>
-// CHECK: [[RESULT:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl.v2f32.v4f16(<2 x float> %a, <4 x half> %b, <4 x half> [[SHUFFLE]])
-// CHECK: ret <2 x float> [[RESULT]]
   return vfmlsl_laneq_low_f16(a, b, c, 4);
 }
 
+// CHECK-LABEL: @test_vfmlsl_laneq_high_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_719:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_719:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7194:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_7195:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71914:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71915:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71924:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71925:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 5
+// CHECK-NEXT:    store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 5
+// CHECK-NEXT:    store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 5
+// CHECK-NEXT:    store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <4 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 5
+// CHECK-NEXT:    store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <4 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <2 x float> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP17:%.*]] = bitcast <4 x half> [[B:%.*]] to <8 x i8>
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast <4 x half> [[VECINIT31]] to <8 x i8>
+// CHECK-NEXT:    [[VFMLSL_HIGH3_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl2.v2f32.v4f16(<2 x float> [[A]], <4 x half> [[B]], <4 x half> [[VECINIT31]]) #3
+// CHECK-NEXT:    ret <2 x float> [[VFMLSL_HIGH3_I]]
+//
 float32x2_t test_vfmlsl_laneq_high_f16(float32x2_t a, float16x4_t b, float16x8_t c) {
-// CHECK-LABEL: define <2 x float> @test_vfmlsl_laneq_high_f16(<2 x float> %a, <4 x half> %b, <8 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <8 x half> %c, <8 x half> undef, <4 x i32> <i32 5, i32 5, i32 5, i32 5>
-// CHECK: [[RESULT:%.*]] = call <2 x float> @llvm.aarch64.neon.fmlsl2.v2f32.v4f16(<2 x float> %a, <4 x half> %b, <4 x half> [[SHUFFLE]])
-// CHECK: ret <2 x float> [[RESULT]]
   return vfmlsl_laneq_high_f16(a, b, c, 5);
 }
 
+// CHECK-LABEL: @test_vfmlslq_laneq_low_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_719:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_719:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7194:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_7195:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71914:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71915:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71924:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71925:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71934:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71935:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71944:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71945:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71954:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71955:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71964:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71965:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71934]], align 16
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_71934]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP17:%.*]] = load <8 x i16>, <8 x i16>* [[TMP16]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE38:%.*]] = extractelement <8 x i16> [[TMP17]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE38]], i16* [[__REINT1_71935]], align 2
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast i16* [[__REINT1_71935]] to half*
+// CHECK-NEXT:    [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
+// CHECK-NEXT:    [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71944]], align 16
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_71944]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP21:%.*]] = load <8 x i16>, <8 x i16>* [[TMP20]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE48:%.*]] = extractelement <8 x i16> [[TMP21]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE48]], i16* [[__REINT1_71945]], align 2
+// CHECK-NEXT:    [[TMP22:%.*]] = bitcast i16* [[__REINT1_71945]] to half*
+// CHECK-NEXT:    [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
+// CHECK-NEXT:    [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71954]], align 16
+// CHECK-NEXT:    [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_71954]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP25:%.*]] = load <8 x i16>, <8 x i16>* [[TMP24]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE58:%.*]] = extractelement <8 x i16> [[TMP25]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE58]], i16* [[__REINT1_71955]], align 2
+// CHECK-NEXT:    [[TMP26:%.*]] = bitcast i16* [[__REINT1_71955]] to half*
+// CHECK-NEXT:    [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
+// CHECK-NEXT:    [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71964]], align 16
+// CHECK-NEXT:    [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_71964]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP29:%.*]] = load <8 x i16>, <8 x i16>* [[TMP28]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE68:%.*]] = extractelement <8 x i16> [[TMP29]], i32 6
+// CHECK-NEXT:    store i16 [[VGETQ_LANE68]], i16* [[__REINT1_71965]], align 2
+// CHECK-NEXT:    [[TMP30:%.*]] = bitcast i16* [[__REINT1_71965]] to half*
+// CHECK-NEXT:    [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
+// CHECK-NEXT:    [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
+// CHECK-NEXT:    [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
+// CHECK-NEXT:    [[VFMLSL_LOW3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]]) #3
+// CHECK-NEXT:    ret <4 x float> [[VFMLSL_LOW3_I]]
+//
 float32x4_t test_vfmlslq_laneq_low_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
-// CHECK-LABEL: define <4 x float> @test_vfmlslq_laneq_low_f16(<4 x float> %a, <8 x half> %b, <8 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <8 x half> %c, <8 x half> undef, <8 x i32> <i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6, i32 6>
-// CHECK: [[RESULT:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl.v4f32.v8f16(<4 x float> %a, <8 x half> %b, <8 x half> [[SHUFFLE]])
-// CHECK: ret <4 x float> [[RESULT]]
   return vfmlslq_laneq_low_f16(a, b, c, 6);
 }
 
+// CHECK-LABEL: @test_vfmlslq_laneq_high_f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[__REINT_719:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_719:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_7194:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_7195:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71914:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71915:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71924:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71925:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71934:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71935:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71944:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71945:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71954:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71955:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    [[__REINT_71964:%.*]] = alloca <8 x half>, align 16
+// CHECK-NEXT:    [[__REINT1_71965:%.*]] = alloca i16, align 2
+// CHECK-NEXT:    store <8 x half> [[C:%.*]], <8 x half>* [[__REINT_719]], align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x half>* [[__REINT_719]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, <8 x i16>* [[TMP0]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE]], i16* [[__REINT1_719]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16* [[__REINT1_719]] to half*
+// CHECK-NEXT:    [[TMP3:%.*]] = load half, half* [[TMP2]], align 2
+// CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <8 x half> undef, half [[TMP3]], i32 0
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_7194]], align 16
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x half>* [[__REINT_7194]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <8 x i16>, <8 x i16>* [[TMP4]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE8:%.*]] = extractelement <8 x i16> [[TMP5]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE8]], i16* [[__REINT1_7195]], align 2
+// CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[__REINT1_7195]] to half*
+// CHECK-NEXT:    [[TMP7:%.*]] = load half, half* [[TMP6]], align 2
+// CHECK-NEXT:    [[VECINIT11:%.*]] = insertelement <8 x half> [[VECINIT]], half [[TMP7]], i32 1
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71914]], align 16
+// CHECK-NEXT:    [[TMP8:%.*]] = bitcast <8 x half>* [[__REINT_71914]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP9:%.*]] = load <8 x i16>, <8 x i16>* [[TMP8]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE18:%.*]] = extractelement <8 x i16> [[TMP9]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE18]], i16* [[__REINT1_71915]], align 2
+// CHECK-NEXT:    [[TMP10:%.*]] = bitcast i16* [[__REINT1_71915]] to half*
+// CHECK-NEXT:    [[TMP11:%.*]] = load half, half* [[TMP10]], align 2
+// CHECK-NEXT:    [[VECINIT21:%.*]] = insertelement <8 x half> [[VECINIT11]], half [[TMP11]], i32 2
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71924]], align 16
+// CHECK-NEXT:    [[TMP12:%.*]] = bitcast <8 x half>* [[__REINT_71924]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP13:%.*]] = load <8 x i16>, <8 x i16>* [[TMP12]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE28:%.*]] = extractelement <8 x i16> [[TMP13]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE28]], i16* [[__REINT1_71925]], align 2
+// CHECK-NEXT:    [[TMP14:%.*]] = bitcast i16* [[__REINT1_71925]] to half*
+// CHECK-NEXT:    [[TMP15:%.*]] = load half, half* [[TMP14]], align 2
+// CHECK-NEXT:    [[VECINIT31:%.*]] = insertelement <8 x half> [[VECINIT21]], half [[TMP15]], i32 3
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71934]], align 16
+// CHECK-NEXT:    [[TMP16:%.*]] = bitcast <8 x half>* [[__REINT_71934]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP17:%.*]] = load <8 x i16>, <8 x i16>* [[TMP16]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE38:%.*]] = extractelement <8 x i16> [[TMP17]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE38]], i16* [[__REINT1_71935]], align 2
+// CHECK-NEXT:    [[TMP18:%.*]] = bitcast i16* [[__REINT1_71935]] to half*
+// CHECK-NEXT:    [[TMP19:%.*]] = load half, half* [[TMP18]], align 2
+// CHECK-NEXT:    [[VECINIT41:%.*]] = insertelement <8 x half> [[VECINIT31]], half [[TMP19]], i32 4
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71944]], align 16
+// CHECK-NEXT:    [[TMP20:%.*]] = bitcast <8 x half>* [[__REINT_71944]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP21:%.*]] = load <8 x i16>, <8 x i16>* [[TMP20]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE48:%.*]] = extractelement <8 x i16> [[TMP21]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE48]], i16* [[__REINT1_71945]], align 2
+// CHECK-NEXT:    [[TMP22:%.*]] = bitcast i16* [[__REINT1_71945]] to half*
+// CHECK-NEXT:    [[TMP23:%.*]] = load half, half* [[TMP22]], align 2
+// CHECK-NEXT:    [[VECINIT51:%.*]] = insertelement <8 x half> [[VECINIT41]], half [[TMP23]], i32 5
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71954]], align 16
+// CHECK-NEXT:    [[TMP24:%.*]] = bitcast <8 x half>* [[__REINT_71954]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP25:%.*]] = load <8 x i16>, <8 x i16>* [[TMP24]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE58:%.*]] = extractelement <8 x i16> [[TMP25]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE58]], i16* [[__REINT1_71955]], align 2
+// CHECK-NEXT:    [[TMP26:%.*]] = bitcast i16* [[__REINT1_71955]] to half*
+// CHECK-NEXT:    [[TMP27:%.*]] = load half, half* [[TMP26]], align 2
+// CHECK-NEXT:    [[VECINIT61:%.*]] = insertelement <8 x half> [[VECINIT51]], half [[TMP27]], i32 6
+// CHECK-NEXT:    store <8 x half> [[C]], <8 x half>* [[__REINT_71964]], align 16
+// CHECK-NEXT:    [[TMP28:%.*]] = bitcast <8 x half>* [[__REINT_71964]] to <8 x i16>*
+// CHECK-NEXT:    [[TMP29:%.*]] = load <8 x i16>, <8 x i16>* [[TMP28]], align 16
+// CHECK-NEXT:    [[VGETQ_LANE68:%.*]] = extractelement <8 x i16> [[TMP29]], i32 7
+// CHECK-NEXT:    store i16 [[VGETQ_LANE68]], i16* [[__REINT1_71965]], align 2
+// CHECK-NEXT:    [[TMP30:%.*]] = bitcast i16* [[__REINT1_71965]] to half*
+// CHECK-NEXT:    [[TMP31:%.*]] = load half, half* [[TMP30]], align 2
+// CHECK-NEXT:    [[VECINIT71:%.*]] = insertelement <8 x half> [[VECINIT61]], half [[TMP31]], i32 7
+// CHECK-NEXT:    [[TMP32:%.*]] = bitcast <4 x float> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP33:%.*]] = bitcast <8 x half> [[B:%.*]] to <16 x i8>
+// CHECK-NEXT:    [[TMP34:%.*]] = bitcast <8 x half> [[VECINIT71]] to <16 x i8>
+// CHECK-NEXT:    [[VFMLSL_HIGH3_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl2.v4f32.v8f16(<4 x float> [[A]], <8 x half> [[B]], <8 x half> [[VECINIT71]]) #3
+// CHECK-NEXT:    ret <4 x float> [[VFMLSL_HIGH3_I]]
+//
 float32x4_t test_vfmlslq_laneq_high_f16(float32x4_t a, float16x8_t b, float16x8_t c) {
-// CHECK-LABEL: define <4 x float> @test_vfmlslq_laneq_high_f16(<4 x float> %a, <8 x half> %b, <8 x half> %c)
-// CHECK: [[SHUFFLE:%.*]] = shufflevector <8 x half> %c, <8 x half> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
-// CHECK: [[RESULT:%.*]] = call <4 x float> @llvm.aarch64.neon.fmlsl2.v4f32.v8f16(<4 x float> %a, <8 x half> %b, <8 x half> [[SHUFFLE]])
-// CHECK: ret <4 x float> [[RESULT]]
   return vfmlslq_laneq_high_f16(a, b, c, 7);
 }


        


More information about the cfe-commits mailing list