[llvm] r197993 - Add missing pattern matches to support ACLE intrinsics of AArch64 NEON.

Jiangning Liu jiangning.liu at arm.com
Tue Dec 24 17:22:51 PST 2013


Author: jiangning
Date: Tue Dec 24 19:22:51 2013
New Revision: 197993

URL: http://llvm.org/viewvc/llvm-project?rev=197993&view=rev
Log:
Add missing pattern matches to support ACLE intrinsics of AArch64 NEON.

Modified:
    llvm/trunk/test/CodeGen/AArch64/neon-2velem.ll
    llvm/trunk/test/CodeGen/AArch64/neon-scalar-shift.ll
    llvm/trunk/test/CodeGen/AArch64/neon-simd-ldst-one.ll

Modified: llvm/trunk/test/CodeGen/AArch64/neon-2velem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/neon-2velem.ll?rev=197993&r1=197992&r2=197993&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/neon-2velem.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/neon-2velem.ll Tue Dec 24 19:22:51 2013
@@ -466,6 +466,49 @@ entry:
   ret <2 x double> %0
 }
 
+define float @test_vfmas_laneq_f32(float %a, float %b, <4 x float> %v) {
+; CHECK-LABEL: test_vfmas_laneq_f32
+; CHECK: fmla {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+entry:
+  %extract = extractelement <4 x float> %v, i32 3
+  %0 = tail call float @llvm.fma.f32(float %b, float %extract, float %a)
+  ret float %0
+}
+
+declare float @llvm.fma.f32(float, float, float)
+
+define double @test_vfmsd_lane_f64(double %a, double %b, <1 x double> %v) {
+; CHECK-LABEL: test_vfmsd_lane_f64
+; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[0]
+entry:
+  %extract.rhs = extractelement <1 x double> %v, i32 0
+  %extract = fsub double -0.000000e+00, %extract.rhs
+  %0 = tail call double @llvm.fma.f64(double %b, double %extract, double %a)
+  ret double %0
+}
+
+declare double @llvm.fma.f64(double, double, double)
+
+define float @test_vfmss_laneq_f32(float %a, float %b, <4 x float> %v) {
+; CHECK: test_vfmss_laneq_f32
+; CHECK: fmls {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
+entry:
+  %extract.rhs = extractelement <4 x float> %v, i32 3
+  %extract = fsub float -0.000000e+00, %extract.rhs
+  %0 = tail call float @llvm.fma.f32(float %b, float %extract, float %a)
+  ret float %0
+}
+
+define double @test_vfmsd_laneq_f64(double %a, double %b, <2 x double> %v) {
+; CHECK-LABEL: test_vfmsd_laneq_f64
+; CHECK: fmls {{d[0-9]+}}, {{d[0-9]+}}, {{v[0-9]+}}.d[1]
+entry:
+  %extract.rhs = extractelement <2 x double> %v, i32 1
+  %extract = fsub double -0.000000e+00, %extract.rhs
+  %0 = tail call double @llvm.fma.f64(double %b, double %extract, double %a)
+  ret double %0
+}
+
 define <4 x i32> @test_vmlal_lane_s16(<4 x i32> %a, <4 x i16> %b, <4 x i16> %v) {
 ; CHECK: test_vmlal_lane_s16:
 ; CHECK: mlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]

Modified: llvm/trunk/test/CodeGen/AArch64/neon-scalar-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/neon-scalar-shift.ll?rev=197993&r1=197992&r2=197993&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/neon-scalar-shift.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/neon-scalar-shift.ll Tue Dec 24 19:22:51 2013
@@ -35,4 +35,202 @@ define <1 x i64> @test_sshl_v1i64_aarch6
   ret <1 x i64> %tmp1
 }
 
+define <1 x i64> @test_vtst_s64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK-LABEL: test_vtst_s64
+; CHECK: cmtst {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+entry:
+  %0 = and <1 x i64> %a, %b
+  %1 = icmp ne <1 x i64> %0, zeroinitializer
+  %vtst.i = sext <1 x i1> %1 to <1 x i64>
+  ret <1 x i64> %vtst.i
+}
+
+define <1 x i64> @test_vtst_u64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK-LABEL: test_vtst_u64
+; CHECK: cmtst {{d[0-9]+}}, {{d[0-9]+}}, {{d[0-9]+}}
+entry:
+  %0 = and <1 x i64> %a, %b
+  %1 = icmp ne <1 x i64> %0, zeroinitializer
+  %vtst.i = sext <1 x i1> %1 to <1 x i64>
+  ret <1 x i64> %vtst.i
+}
+
+define <1 x i64> @test_vsli_n_p64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK-LABEL: test_vsli_n_p64
+; CHECK: sli {{d[0-9]+}}, {{d[0-9]+}}, #0
+entry:
+  %vsli_n2 = tail call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> %a, <1 x i64> %b, i32 0)
+  ret <1 x i64> %vsli_n2
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64>, <1 x i64>, i32)
+
+define <2 x i64> @test_vsliq_n_p64(<2 x i64> %a, <2 x i64> %b) {
+; CHECK-LABEL: test_vsliq_n_p64
+; CHECK: sli {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
+entry:
+  %vsli_n2 = tail call <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64> %a, <2 x i64> %b, i32 0)
+  ret <2 x i64> %vsli_n2
+}
+
+declare <2 x i64> @llvm.aarch64.neon.vsli.v2i64(<2 x i64>, <2 x i64>, i32)
+
+define <2 x i32> @test_vrsqrte_u32(<2 x i32> %a) {
+; CHECK-LABEL: test_vrsqrte_u32
+; CHECK: ursqrte {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
+entry:
+  %vrsqrte1.i = tail call <2 x i32> @llvm.arm.neon.vrsqrte.v2i32(<2 x i32> %a)
+  ret <2 x i32> %vrsqrte1.i
+}
+
+define <4 x i32> @test_vrsqrteq_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_vrsqrteq_u32
+; CHECK: ursqrte {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
+entry:
+  %vrsqrte1.i = tail call <4 x i32> @llvm.arm.neon.vrsqrte.v4i32(<4 x i32> %a)
+  ret <4 x i32> %vrsqrte1.i
+}
+
+define <8 x i8> @test_vqshl_n_s8(<8 x i8> %a) {
+; CHECK-LABEL: test_vqshl_n_s8
+; CHECK: sqshl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
+entry:
+  %vqshl_n = tail call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %a, <8 x i8> zeroinitializer)
+  ret <8 x i8> %vqshl_n
+}
+
+declare <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8>, <8 x i8>)
+
+define <16 x i8> @test_vqshlq_n_s8(<16 x i8> %a) {
+; CHECK-LABEL: test_vqshlq_n_s8
+; CHECK: sqshl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
+entry:
+  %vqshl_n = tail call <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8> %a, <16 x i8> zeroinitializer)
+  ret <16 x i8> %vqshl_n
+}
+
+declare <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8>, <16 x i8>)
+
+define <4 x i16> @test_vqshl_n_s16(<4 x i16> %a) {
+; CHECK-LABEL: test_vqshl_n_s16
+; CHECK: sqshl {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
+entry:
+  %vqshl_n1 = tail call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %a, <4 x i16> zeroinitializer)
+  ret <4 x i16> %vqshl_n1
+}
+
+declare <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16>, <4 x i16>)
+
+define <8 x i16> @test_vqshlq_n_s16(<8 x i16> %a) {
+; CHECK-LABEL: test_vqshlq_n_s16
+; CHECK: sqshl {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
+entry:
+  %vqshl_n1 = tail call <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16> %a, <8 x i16> zeroinitializer)
+  ret <8 x i16> %vqshl_n1
+}
+
+declare <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16>, <8 x i16>)
+
+define <2 x i32> @test_vqshl_n_s32(<2 x i32> %a) {
+; CHECK-LABEL: test_vqshl_n_s32
+; CHECK: sqshl {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
+entry:
+  %vqshl_n1 = tail call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %a, <2 x i32> zeroinitializer)
+  ret <2 x i32> %vqshl_n1
+}
+
+declare <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32>, <2 x i32>)
+
+define <4 x i32> @test_vqshlq_n_s32(<4 x i32> %a) {
+; CHECK-LABEL: test_vqshlq_n_s32
+; CHECK: sqshl {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
+entry:
+  %vqshl_n1 = tail call <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32> %a, <4 x i32> zeroinitializer)
+  ret <4 x i32> %vqshl_n1
+}
+
+declare <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @test_vqshlq_n_s64(<2 x i64> %a) {
+; CHECK-LABEL: test_vqshlq_n_s64
+; CHECK: sqshl {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, #0
+entry:
+  %vqshl_n1 = tail call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %a, <2 x i64> zeroinitializer)
+  ret <2 x i64> %vqshl_n1
+}
+
+declare <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64>, <2 x i64>)
+
+define <8 x i8> @test_vqshl_n_u8(<8 x i8> %a) {
+; CHECK-LABEL: test_vqshl_n_u8
+; CHECK: uqshl {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0
+entry:
+  %vqshl_n = tail call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %a, <8 x i8> zeroinitializer)
+  ret <8 x i8> %vqshl_n
+}
+
+declare <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8>, <8 x i8>)
+
+define <16 x i8> @test_vqshlq_n_u8(<16 x i8> %a) {
+; CHECK-LABEL: test_vqshlq_n_u8
+; CHECK: uqshl {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0
+entry:
+  %vqshl_n = tail call <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8> %a, <16 x i8> zeroinitializer)
+  ret <16 x i8> %vqshl_n
+}
+
+declare <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8>, <16 x i8>)
+
+define <4 x i16> @test_vqshl_n_u16(<4 x i16> %a) {
+; CHECK-LABEL: test_vqshl_n_u16
+; CHECK: uqshl {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, #0
+entry:
+  %vqshl_n1 = tail call <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16> %a, <4 x i16> zeroinitializer)
+  ret <4 x i16> %vqshl_n1
+}
+
+declare <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16>, <4 x i16>)
+
+define <8 x i16> @test_vqshlq_n_u16(<8 x i16> %a) {
+; CHECK-LABEL: test_vqshlq_n_u16
+; CHECK: uqshl {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, #0
+entry:
+  %vqshl_n1 = tail call <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16> %a, <8 x i16> zeroinitializer)
+  ret <8 x i16> %vqshl_n1
+}
+
+declare <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16>, <8 x i16>)
+
+define <2 x i32> @test_vqshl_n_u32(<2 x i32> %a) {
+; CHECK-LABEL: test_vqshl_n_u32
+; CHECK: uqshl {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, #0
+entry:
+  %vqshl_n1 = tail call <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32> %a, <2 x i32> zeroinitializer)
+  ret <2 x i32> %vqshl_n1
+}
+
+declare <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32>, <2 x i32>)
+
+define <4 x i32> @test_vqshlq_n_u32(<4 x i32> %a) {
+; CHECK-LABEL: test_vqshlq_n_u32
+; CHECK: uqshl {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, #0
+entry:
+  %vqshl_n1 = tail call <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32> %a, <4 x i32> zeroinitializer)
+  ret <4 x i32> %vqshl_n1
+}
+
+declare <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32>, <4 x i32>)
+
+define <2 x i64> @test_vqshlq_n_u64(<2 x i64> %a) {
+; CHECK-LABEL: test_vqshlq_n_u64
+; CHECK: uqshl {{v[0-9]+}}.2d, {{v[0-9]+}}.2d,
+entry:
+  %vqshl_n1 = tail call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %a, <2 x i64> zeroinitializer)
+  ret <2 x i64> %vqshl_n1
+}
+
+declare <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64>, <2 x i64>)
+
+declare <4 x i32> @llvm.arm.neon.vrsqrte.v4i32(<4 x i32>)
 
+declare <2 x i32> @llvm.arm.neon.vrsqrte.v2i32(<2 x i32>)

Modified: llvm/trunk/test/CodeGen/AArch64/neon-simd-ldst-one.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/neon-simd-ldst-one.ll?rev=197993&r1=197992&r2=197993&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/neon-simd-ldst-one.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/neon-simd-ldst-one.ll Tue Dec 24 19:22:51 2013
@@ -1,5 +1,8 @@
 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
 
+%struct.uint8x16x2_t = type { [2 x <16 x i8>] }
+%struct.poly8x16x2_t = type { [2 x <16 x i8>] }
+%struct.uint8x16x3_t = type { [3 x <16 x i8>] }
 %struct.int8x16x2_t = type { [2 x <16 x i8>] }
 %struct.int16x8x2_t = type { [2 x <8 x i16>] }
 %struct.int32x4x2_t = type { [2 x <4 x i32>] }
@@ -2216,4 +2219,81 @@ declare void @llvm.arm.neon.vst4lane.v4i
 declare void @llvm.arm.neon.vst4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32, i32)
 declare void @llvm.arm.neon.vst4lane.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32, i32)
 declare void @llvm.arm.neon.vst4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32, i32)
-declare void @llvm.arm.neon.vst4lane.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, <1 x double>, i32, i32)
\ No newline at end of file
+declare void @llvm.arm.neon.vst4lane.v1f64(i8*, <1 x double>, <1 x double>, <1 x double>, <1 x double>, i32, i32)
+
+define %struct.int8x16x2_t @test_vld2q_lane_s8(i8* readonly %ptr, [2 x <16 x i8>] %src.coerce) {
+; CHECK-LABEL: test_vld2q_lane_s8
+; CHECK: ld2 {{{v[0-9]+}}.b, {{v[0-9]+}}.b}[15], [x0]
+entry:
+  %src.coerce.fca.0.extract = extractvalue [2 x <16 x i8>] %src.coerce, 0
+  %src.coerce.fca.1.extract = extractvalue [2 x <16 x i8>] %src.coerce, 1
+  %vld2_lane = tail call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2lane.v16i8(i8* %ptr, <16 x i8> %src.coerce.fca.0.extract, <16 x i8> %src.coerce.fca.1.extract, i32 15, i32 1)
+  %vld2_lane.fca.0.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2_lane, 0
+  %vld2_lane.fca.1.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2_lane, 1
+  %.fca.0.0.insert = insertvalue %struct.int8x16x2_t undef, <16 x i8> %vld2_lane.fca.0.extract, 0, 0
+  %.fca.0.1.insert = insertvalue %struct.int8x16x2_t %.fca.0.0.insert, <16 x i8> %vld2_lane.fca.1.extract, 0, 1
+  ret %struct.int8x16x2_t %.fca.0.1.insert
+}
+
+define %struct.uint8x16x2_t @test_vld2q_lane_u8(i8* readonly %ptr, [2 x <16 x i8>] %src.coerce) {
+; CHECK-LABEL: test_vld2q_lane_u8
+; CHECK: ld2 {{{v[0-9]+}}.b, {{v[0-9]+}}.b}[15], [x0]
+entry:
+  %src.coerce.fca.0.extract = extractvalue [2 x <16 x i8>] %src.coerce, 0
+  %src.coerce.fca.1.extract = extractvalue [2 x <16 x i8>] %src.coerce, 1
+  %vld2_lane = tail call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2lane.v16i8(i8* %ptr, <16 x i8> %src.coerce.fca.0.extract, <16 x i8> %src.coerce.fca.1.extract, i32 15, i32 1)
+  %vld2_lane.fca.0.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2_lane, 0
+  %vld2_lane.fca.1.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2_lane, 1
+  %.fca.0.0.insert = insertvalue %struct.uint8x16x2_t undef, <16 x i8> %vld2_lane.fca.0.extract, 0, 0
+  %.fca.0.1.insert = insertvalue %struct.uint8x16x2_t %.fca.0.0.insert, <16 x i8> %vld2_lane.fca.1.extract, 0, 1
+  ret %struct.uint8x16x2_t %.fca.0.1.insert
+}
+
+define %struct.poly8x16x2_t @test_vld2q_lane_p8(i8* readonly %ptr, [2 x <16 x i8>] %src.coerce) {
+; CHECK-LABEL: test_vld2q_lane_p8
+; CHECK: ld2 {{{v[0-9]+}}.b, {{v[0-9]+}}.b}[15], [x0]
+entry:
+  %src.coerce.fca.0.extract = extractvalue [2 x <16 x i8>] %src.coerce, 0
+  %src.coerce.fca.1.extract = extractvalue [2 x <16 x i8>] %src.coerce, 1
+  %vld2_lane = tail call { <16 x i8>, <16 x i8> } @llvm.arm.neon.vld2lane.v16i8(i8* %ptr, <16 x i8> %src.coerce.fca.0.extract, <16 x i8> %src.coerce.fca.1.extract, i32 15, i32 1)
+  %vld2_lane.fca.0.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2_lane, 0
+  %vld2_lane.fca.1.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2_lane, 1
+  %.fca.0.0.insert = insertvalue %struct.poly8x16x2_t undef, <16 x i8> %vld2_lane.fca.0.extract, 0, 0
+  %.fca.0.1.insert = insertvalue %struct.poly8x16x2_t %.fca.0.0.insert, <16 x i8> %vld2_lane.fca.1.extract, 0, 1
+  ret %struct.poly8x16x2_t %.fca.0.1.insert
+}
+
+define %struct.int8x16x3_t @test_vld3q_lane_s8(i8* readonly %ptr, [3 x <16 x i8>] %src.coerce) {
+; CHECK-LABEL: test_vld3q_lane_s8
+; CHECK: ld3 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[15], [x0]
+entry:
+  %src.coerce.fca.0.extract = extractvalue [3 x <16 x i8>] %src.coerce, 0
+  %src.coerce.fca.1.extract = extractvalue [3 x <16 x i8>] %src.coerce, 1
+  %src.coerce.fca.2.extract = extractvalue [3 x <16 x i8>] %src.coerce, 2
+  %vld3_lane = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3lane.v16i8(i8* %ptr, <16 x i8> %src.coerce.fca.0.extract, <16 x i8> %src.coerce.fca.1.extract, <16 x i8> %src.coerce.fca.2.extract, i32 15, i32 1)
+  %vld3_lane.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3_lane, 0
+  %vld3_lane.fca.1.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3_lane, 1
+  %vld3_lane.fca.2.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3_lane, 2
+  %.fca.0.0.insert = insertvalue %struct.int8x16x3_t undef, <16 x i8> %vld3_lane.fca.0.extract, 0, 0
+  %.fca.0.1.insert = insertvalue %struct.int8x16x3_t %.fca.0.0.insert, <16 x i8> %vld3_lane.fca.1.extract, 0, 1
+  %.fca.0.2.insert = insertvalue %struct.int8x16x3_t %.fca.0.1.insert, <16 x i8> %vld3_lane.fca.2.extract, 0, 2
+  ret %struct.int8x16x3_t %.fca.0.2.insert
+}
+
+define %struct.uint8x16x3_t @test_vld3q_lane_u8(i8* readonly %ptr, [3 x <16 x i8>] %src.coerce) {
+; CHECK-LABEL: test_vld3q_lane_u8
+; CHECK: ld3 {{{v[0-9]+}}.b, {{v[0-9]+}}.b, {{v[0-9]+}}.b}[15], [x0]
+entry:
+  %src.coerce.fca.0.extract = extractvalue [3 x <16 x i8>] %src.coerce, 0
+  %src.coerce.fca.1.extract = extractvalue [3 x <16 x i8>] %src.coerce, 1
+  %src.coerce.fca.2.extract = extractvalue [3 x <16 x i8>] %src.coerce, 2
+  %vld3_lane = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3lane.v16i8(i8* %ptr, <16 x i8> %src.coerce.fca.0.extract, <16 x i8> %src.coerce.fca.1.extract, <16 x i8> %src.coerce.fca.2.extract, i32 15, i32 1)
+  %vld3_lane.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3_lane, 0
+  %vld3_lane.fca.1.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3_lane, 1
+  %vld3_lane.fca.2.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %vld3_lane, 2
+  %.fca.0.0.insert = insertvalue %struct.uint8x16x3_t undef, <16 x i8> %vld3_lane.fca.0.extract, 0, 0
+  %.fca.0.1.insert = insertvalue %struct.uint8x16x3_t %.fca.0.0.insert, <16 x i8> %vld3_lane.fca.1.extract, 0, 1
+  %.fca.0.2.insert = insertvalue %struct.uint8x16x3_t %.fca.0.1.insert, <16 x i8> %vld3_lane.fca.2.extract, 0, 2
+  ret %struct.uint8x16x3_t %.fca.0.2.insert
+}
+





More information about the llvm-commits mailing list