[llvm] r194990 - Implement the newly added ACLE functions for ld1/st1 with 2/3/4 vectors.

Chad Rosier mcrosier at codeaurora.org
Tue Nov 19 12:23:41 PST 2013


Great!  Thanks, Hao.

> Hi Chad,
>
> We have already such test cases in r194043 (post-index instructions) and
> r192361 (normal instructions).
> This patch just match the ACLE intrinsics to the already existing
> instructions.
>
> Thanks,
> -Hao
>
> -----Original Message-----
> From: Chad Rosier [mailto:mcrosier at codeaurora.org]
> Sent: Tuesday, November 19, 2013 1:04 AM
> To: Hao Liu
> Cc: llvm-commits at cs.uiuc.edu
> Subject: Re: [llvm] r194990 - Implement the newly added ACLE functions for
> ld1/st1 with 2/3/4 vectors.
>
> Hi Hao,
> Do we need to add diagnostics tests (i.e.,
> test/MC/AArch64/neon-diagnostics.s) and/or assembler/disassember tests
> (i.e., test/MC/Disassembler/AArch64/neon-instructions.txt)?
>
>  Chad
>
>> Author: haoliu
>> Date: Mon Nov 18 00:31:53 2013
>> New Revision: 194990
>>
>> URL: http://llvm.org/viewvc/llvm-project?rev=194990&view=rev
>> Log:
>> Implement the newly added ACLE functions for ld1/st1 with 2/3/4 vectors.
>> The functions are like: vst1_s8_x2 ...
>>
>> Modified:
>>     llvm/trunk/include/llvm/IR/IntrinsicsAArch64.td
>>     llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
>>     llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
>>     llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h
>>     llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td
>>     llvm/trunk/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll
>>     llvm/trunk/test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll
>>
>> Modified: llvm/trunk/include/llvm/IR/IntrinsicsAArch64.td
>> URL:
>>
> http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IntrinsicsAAr
> ch64.td?rev=194990&r1=194989&r2=194990&view=diff
>>
> ============================================================================
> ==
>> --- llvm/trunk/include/llvm/IR/IntrinsicsAArch64.td (original)
>> +++ llvm/trunk/include/llvm/IR/IntrinsicsAArch64.td Mon Nov 18 00:31:53
>> 2013
>> @@ -163,6 +163,33 @@ def int_aarch64_neon_vtbx4 :
>>              LLVMMatchType<1>,  LLVMMatchType<1>, LLVMMatchType<0>],
>>              [IntrNoMem]>;
>>
>> +// Vector Load/store
>> +def int_aarch64_neon_vld1x2 : Intrinsic<[llvm_anyvector_ty,
>> LLVMMatchType<0>],
>> +                                        [llvm_ptr_ty, llvm_i32_ty],
>> +                                        [IntrReadArgMem]>;
>> +def int_aarch64_neon_vld1x3 : Intrinsic<[llvm_anyvector_ty,
>> LLVMMatchType<0>,
>> +                                         LLVMMatchType<0>],
>> +                                        [llvm_ptr_ty, llvm_i32_ty],
>> +                                        [IntrReadArgMem]>;
>> +def int_aarch64_neon_vld1x4 : Intrinsic<[llvm_anyvector_ty,
>> LLVMMatchType<0>,
>> +                                         LLVMMatchType<0>,
>> LLVMMatchType<0>],
>> +                                        [llvm_ptr_ty, llvm_i32_ty],
>> +                                        [IntrReadArgMem]>;
>> +
>> +def int_aarch64_neon_vst1x2 : Intrinsic<[],
>> +                                        [llvm_ptr_ty,
>> llvm_anyvector_ty,
>> +                                         LLVMMatchType<0>,
>> llvm_i32_ty],
>> +                                        [IntrReadWriteArgMem]>;
>> +def int_aarch64_neon_vst1x3 : Intrinsic<[],
>> +                                        [llvm_ptr_ty,
>> llvm_anyvector_ty,
>> +                                         LLVMMatchType<0>,
>> LLVMMatchType<0>,
>> +                                         llvm_i32_ty],
>> [IntrReadWriteArgMem]>;
>> +def int_aarch64_neon_vst1x4 : Intrinsic<[],
>> +                                        [llvm_ptr_ty,
>> llvm_anyvector_ty,
>> +                                         LLVMMatchType<0>,
>> LLVMMatchType<0>,
>> +                                         LLVMMatchType<0>,
>> llvm_i32_ty],
>> +                                        [IntrReadWriteArgMem]>;
>> +
>>  // Scalar Add
>>  def int_aarch64_neon_vaddds :
>>    Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty],
>> [IntrNoMem]>;
>>
>> Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
>> URL:
>>
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISe
> lDAGToDAG.cpp?rev=194990&r1=194989&r2=194990&view=diff
>>
> ============================================================================
> ==
>> --- llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp (original)
>> +++ llvm/trunk/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp Mon Nov 18
>> 00:31:53 2013
>> @@ -483,7 +483,6 @@ static unsigned getVLDSTRegisterUpdateOp
>>    case AArch64::LD2WB_8B_fixed: return AArch64::LD2WB_8B_register;
>>    case AArch64::LD2WB_4H_fixed: return AArch64::LD2WB_4H_register;
>>    case AArch64::LD2WB_2S_fixed: return AArch64::LD2WB_2S_register;
>> -  case AArch64::LD1WB2V_1D_fixed: return AArch64::LD1WB2V_1D_register;
>>    case AArch64::LD2WB_16B_fixed: return AArch64::LD2WB_16B_register;
>>    case AArch64::LD2WB_8H_fixed: return AArch64::LD2WB_8H_register;
>>    case AArch64::LD2WB_4S_fixed: return AArch64::LD2WB_4S_register;
>> @@ -492,7 +491,6 @@ static unsigned getVLDSTRegisterUpdateOp
>>    case AArch64::LD3WB_8B_fixed: return AArch64::LD3WB_8B_register;
>>    case AArch64::LD3WB_4H_fixed: return AArch64::LD3WB_4H_register;
>>    case AArch64::LD3WB_2S_fixed: return AArch64::LD3WB_2S_register;
>> -  case AArch64::LD1WB3V_1D_fixed: return AArch64::LD1WB3V_1D_register;
>>    case AArch64::LD3WB_16B_fixed: return AArch64::LD3WB_16B_register;
>>    case AArch64::LD3WB_8H_fixed: return AArch64::LD3WB_8H_register;
>>    case AArch64::LD3WB_4S_fixed: return AArch64::LD3WB_4S_register;
>> @@ -501,12 +499,38 @@ static unsigned getVLDSTRegisterUpdateOp
>>    case AArch64::LD4WB_8B_fixed: return AArch64::LD4WB_8B_register;
>>    case AArch64::LD4WB_4H_fixed: return AArch64::LD4WB_4H_register;
>>    case AArch64::LD4WB_2S_fixed: return AArch64::LD4WB_2S_register;
>> -  case AArch64::LD1WB4V_1D_fixed: return AArch64::LD1WB4V_1D_register;
>>    case AArch64::LD4WB_16B_fixed: return AArch64::LD4WB_16B_register;
>>    case AArch64::LD4WB_8H_fixed: return AArch64::LD4WB_8H_register;
>>    case AArch64::LD4WB_4S_fixed: return AArch64::LD4WB_4S_register;
>>    case AArch64::LD4WB_2D_fixed: return AArch64::LD4WB_2D_register;
>>
>> +  case AArch64::LD1x2WB_8B_fixed: return AArch64::LD1x2WB_8B_register;
>> +  case AArch64::LD1x2WB_4H_fixed: return AArch64::LD1x2WB_4H_register;
>> +  case AArch64::LD1x2WB_2S_fixed: return AArch64::LD1x2WB_2S_register;
>> +  case AArch64::LD1x2WB_1D_fixed: return AArch64::LD1x2WB_1D_register;
>> +  case AArch64::LD1x2WB_16B_fixed: return
>> AArch64::LD1x2WB_16B_register;
>> +  case AArch64::LD1x2WB_8H_fixed: return AArch64::LD1x2WB_8H_register;
>> +  case AArch64::LD1x2WB_4S_fixed: return AArch64::LD1x2WB_4S_register;
>> +  case AArch64::LD1x2WB_2D_fixed: return AArch64::LD1x2WB_2D_register;
>> +
>> +  case AArch64::LD1x3WB_8B_fixed: return AArch64::LD1x3WB_8B_register;
>> +  case AArch64::LD1x3WB_4H_fixed: return AArch64::LD1x3WB_4H_register;
>> +  case AArch64::LD1x3WB_2S_fixed: return AArch64::LD1x3WB_2S_register;
>> +  case AArch64::LD1x3WB_1D_fixed: return AArch64::LD1x3WB_1D_register;
>> +  case AArch64::LD1x3WB_16B_fixed: return
>> AArch64::LD1x3WB_16B_register;
>> +  case AArch64::LD1x3WB_8H_fixed: return AArch64::LD1x3WB_8H_register;
>> +  case AArch64::LD1x3WB_4S_fixed: return AArch64::LD1x3WB_4S_register;
>> +  case AArch64::LD1x3WB_2D_fixed: return AArch64::LD1x3WB_2D_register;
>> +
>> +  case AArch64::LD1x4WB_8B_fixed: return AArch64::LD1x4WB_8B_register;
>> +  case AArch64::LD1x4WB_4H_fixed: return AArch64::LD1x4WB_4H_register;
>> +  case AArch64::LD1x4WB_2S_fixed: return AArch64::LD1x4WB_2S_register;
>> +  case AArch64::LD1x4WB_1D_fixed: return AArch64::LD1x4WB_1D_register;
>> +  case AArch64::LD1x4WB_16B_fixed: return
>> AArch64::LD1x4WB_16B_register;
>> +  case AArch64::LD1x4WB_8H_fixed: return AArch64::LD1x4WB_8H_register;
>> +  case AArch64::LD1x4WB_4S_fixed: return AArch64::LD1x4WB_4S_register;
>> +  case AArch64::LD1x4WB_2D_fixed: return AArch64::LD1x4WB_2D_register;
>> +
>>    case AArch64::ST1WB_8B_fixed: return AArch64::ST1WB_8B_register;
>>    case AArch64::ST1WB_4H_fixed: return AArch64::ST1WB_4H_register;
>>    case AArch64::ST1WB_2S_fixed: return AArch64::ST1WB_2S_register;
>> @@ -519,7 +543,6 @@ static unsigned getVLDSTRegisterUpdateOp
>>    case AArch64::ST2WB_8B_fixed: return AArch64::ST2WB_8B_register;
>>    case AArch64::ST2WB_4H_fixed: return AArch64::ST2WB_4H_register;
>>    case AArch64::ST2WB_2S_fixed: return AArch64::ST2WB_2S_register;
>> -  case AArch64::ST1WB2V_1D_fixed: return AArch64::ST1WB2V_1D_register;
>>    case AArch64::ST2WB_16B_fixed: return AArch64::ST2WB_16B_register;
>>    case AArch64::ST2WB_8H_fixed: return AArch64::ST2WB_8H_register;
>>    case AArch64::ST2WB_4S_fixed: return AArch64::ST2WB_4S_register;
>> @@ -528,7 +551,6 @@ static unsigned getVLDSTRegisterUpdateOp
>>    case AArch64::ST3WB_8B_fixed: return AArch64::ST3WB_8B_register;
>>    case AArch64::ST3WB_4H_fixed: return AArch64::ST3WB_4H_register;
>>    case AArch64::ST3WB_2S_fixed: return AArch64::ST3WB_2S_register;
>> -  case AArch64::ST1WB3V_1D_fixed: return AArch64::ST1WB3V_1D_register;
>>    case AArch64::ST3WB_16B_fixed: return AArch64::ST3WB_16B_register;
>>    case AArch64::ST3WB_8H_fixed: return AArch64::ST3WB_8H_register;
>>    case AArch64::ST3WB_4S_fixed: return AArch64::ST3WB_4S_register;
>> @@ -537,11 +559,37 @@ static unsigned getVLDSTRegisterUpdateOp
>>    case AArch64::ST4WB_8B_fixed: return AArch64::ST4WB_8B_register;
>>    case AArch64::ST4WB_4H_fixed: return AArch64::ST4WB_4H_register;
>>    case AArch64::ST4WB_2S_fixed: return AArch64::ST4WB_2S_register;
>> -  case AArch64::ST1WB4V_1D_fixed: return AArch64::ST1WB4V_1D_register;
>>    case AArch64::ST4WB_16B_fixed: return AArch64::ST4WB_16B_register;
>>    case AArch64::ST4WB_8H_fixed: return AArch64::ST4WB_8H_register;
>>    case AArch64::ST4WB_4S_fixed: return AArch64::ST4WB_4S_register;
>>    case AArch64::ST4WB_2D_fixed: return AArch64::ST4WB_2D_register;
>> +
>> +  case AArch64::ST1x2WB_8B_fixed: return AArch64::ST1x2WB_8B_register;
>> +  case AArch64::ST1x2WB_4H_fixed: return AArch64::ST1x2WB_4H_register;
>> +  case AArch64::ST1x2WB_2S_fixed: return AArch64::ST1x2WB_2S_register;
>> +  case AArch64::ST1x2WB_1D_fixed: return AArch64::ST1x2WB_1D_register;
>> +  case AArch64::ST1x2WB_16B_fixed: return
>> AArch64::ST1x2WB_16B_register;
>> +  case AArch64::ST1x2WB_8H_fixed: return AArch64::ST1x2WB_8H_register;
>> +  case AArch64::ST1x2WB_4S_fixed: return AArch64::ST1x2WB_4S_register;
>> +  case AArch64::ST1x2WB_2D_fixed: return AArch64::ST1x2WB_2D_register;
>> +
>> +  case AArch64::ST1x3WB_8B_fixed: return AArch64::ST1x3WB_8B_register;
>> +  case AArch64::ST1x3WB_4H_fixed: return AArch64::ST1x3WB_4H_register;
>> +  case AArch64::ST1x3WB_2S_fixed: return AArch64::ST1x3WB_2S_register;
>> +  case AArch64::ST1x3WB_1D_fixed: return AArch64::ST1x3WB_1D_register;
>> +  case AArch64::ST1x3WB_16B_fixed: return
>> AArch64::ST1x3WB_16B_register;
>> +  case AArch64::ST1x3WB_8H_fixed: return AArch64::ST1x3WB_8H_register;
>> +  case AArch64::ST1x3WB_4S_fixed: return AArch64::ST1x3WB_4S_register;
>> +  case AArch64::ST1x3WB_2D_fixed: return AArch64::ST1x3WB_2D_register;
>> +
>> +  case AArch64::ST1x4WB_8B_fixed: return AArch64::ST1x4WB_8B_register;
>> +  case AArch64::ST1x4WB_4H_fixed: return AArch64::ST1x4WB_4H_register;
>> +  case AArch64::ST1x4WB_2S_fixed: return AArch64::ST1x4WB_2S_register;
>> +  case AArch64::ST1x4WB_1D_fixed: return AArch64::ST1x4WB_1D_register;
>> +  case AArch64::ST1x4WB_16B_fixed: return
>> AArch64::ST1x4WB_16B_register;
>> +  case AArch64::ST1x4WB_8H_fixed: return AArch64::ST1x4WB_8H_register;
>> +  case AArch64::ST1x4WB_4S_fixed: return AArch64::ST1x4WB_4S_register;
>> +  case AArch64::ST1x4WB_2D_fixed: return AArch64::ST1x4WB_2D_register;
>>    }
>>    return Opc; // If not one we handle, return it unchanged.
>>  }
>> @@ -912,7 +960,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNo
>>    case AArch64ISD::NEON_LD2_UPD: {
>>      static const uint16_t Opcodes[] = {
>>        AArch64::LD2WB_8B_fixed,  AArch64::LD2WB_4H_fixed,
>> -      AArch64::LD2WB_2S_fixed,  AArch64::LD1WB2V_1D_fixed,
>> +      AArch64::LD2WB_2S_fixed,  AArch64::LD1x2WB_1D_fixed,
>>        AArch64::LD2WB_16B_fixed, AArch64::LD2WB_8H_fixed,
>>        AArch64::LD2WB_4S_fixed,  AArch64::LD2WB_2D_fixed
>>      };
>> @@ -921,7 +969,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNo
>>    case AArch64ISD::NEON_LD3_UPD: {
>>      static const uint16_t Opcodes[] = {
>>        AArch64::LD3WB_8B_fixed,  AArch64::LD3WB_4H_fixed,
>> -      AArch64::LD3WB_2S_fixed,  AArch64::LD1WB3V_1D_fixed,
>> +      AArch64::LD3WB_2S_fixed,  AArch64::LD1x3WB_1D_fixed,
>>        AArch64::LD3WB_16B_fixed, AArch64::LD3WB_8H_fixed,
>>        AArch64::LD3WB_4S_fixed,  AArch64::LD3WB_2D_fixed
>>      };
>> @@ -930,12 +978,39 @@ SDNode *AArch64DAGToDAGISel::Select(SDNo
>>    case AArch64ISD::NEON_LD4_UPD: {
>>      static const uint16_t Opcodes[] = {
>>        AArch64::LD4WB_8B_fixed,  AArch64::LD4WB_4H_fixed,
>> -      AArch64::LD4WB_2S_fixed,  AArch64::LD1WB4V_1D_fixed,
>> +      AArch64::LD4WB_2S_fixed,  AArch64::LD1x4WB_1D_fixed,
>>        AArch64::LD4WB_16B_fixed, AArch64::LD4WB_8H_fixed,
>>        AArch64::LD4WB_4S_fixed,  AArch64::LD4WB_2D_fixed
>>      };
>>      return SelectVLD(Node, 4, true, Opcodes);
>>    }
>> +  case AArch64ISD::NEON_LD1x2_UPD: {
>> +    static const uint16_t Opcodes[] = {
>> +      AArch64::LD1x2WB_8B_fixed,  AArch64::LD1x2WB_4H_fixed,
>> +      AArch64::LD1x2WB_2S_fixed,  AArch64::LD1x2WB_1D_fixed,
>> +      AArch64::LD1x2WB_16B_fixed, AArch64::LD1x2WB_8H_fixed,
>> +      AArch64::LD1x2WB_4S_fixed,  AArch64::LD1x2WB_2D_fixed
>> +    };
>> +    return SelectVLD(Node, 2, true, Opcodes);
>> +  }
>> +  case AArch64ISD::NEON_LD1x3_UPD: {
>> +    static const uint16_t Opcodes[] = {
>> +      AArch64::LD1x3WB_8B_fixed,  AArch64::LD1x3WB_4H_fixed,
>> +      AArch64::LD1x3WB_2S_fixed,  AArch64::LD1x3WB_1D_fixed,
>> +      AArch64::LD1x3WB_16B_fixed, AArch64::LD1x3WB_8H_fixed,
>> +      AArch64::LD1x3WB_4S_fixed,  AArch64::LD1x3WB_2D_fixed
>> +    };
>> +    return SelectVLD(Node, 3, true, Opcodes);
>> +  }
>> +  case AArch64ISD::NEON_LD1x4_UPD: {
>> +    static const uint16_t Opcodes[] = {
>> +      AArch64::LD1x4WB_8B_fixed,  AArch64::LD1x4WB_4H_fixed,
>> +      AArch64::LD1x4WB_2S_fixed,  AArch64::LD1x4WB_1D_fixed,
>> +      AArch64::LD1x4WB_16B_fixed, AArch64::LD1x4WB_8H_fixed,
>> +      AArch64::LD1x4WB_4S_fixed,  AArch64::LD1x4WB_2D_fixed
>> +    };
>> +    return SelectVLD(Node, 4, true, Opcodes);
>> +  }
>>    case AArch64ISD::NEON_ST1_UPD: {
>>      static const uint16_t Opcodes[] = {
>>        AArch64::ST1WB_8B_fixed,  AArch64::ST1WB_4H_fixed,
>> @@ -948,7 +1023,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNo
>>    case AArch64ISD::NEON_ST2_UPD: {
>>      static const uint16_t Opcodes[] = {
>>        AArch64::ST2WB_8B_fixed,  AArch64::ST2WB_4H_fixed,
>> -      AArch64::ST2WB_2S_fixed,  AArch64::ST1WB2V_1D_fixed,
>> +      AArch64::ST2WB_2S_fixed,  AArch64::ST1x2WB_1D_fixed,
>>        AArch64::ST2WB_16B_fixed, AArch64::ST2WB_8H_fixed,
>>        AArch64::ST2WB_4S_fixed,  AArch64::ST2WB_2D_fixed
>>      };
>> @@ -957,7 +1032,7 @@ SDNode *AArch64DAGToDAGISel::Select(SDNo
>>    case AArch64ISD::NEON_ST3_UPD: {
>>      static const uint16_t Opcodes[] = {
>>        AArch64::ST3WB_8B_fixed,  AArch64::ST3WB_4H_fixed,
>> -      AArch64::ST3WB_2S_fixed,  AArch64::ST1WB3V_1D_fixed,
>> +      AArch64::ST3WB_2S_fixed,  AArch64::ST1x3WB_1D_fixed,
>>        AArch64::ST3WB_16B_fixed, AArch64::ST3WB_8H_fixed,
>>        AArch64::ST3WB_4S_fixed,  AArch64::ST3WB_2D_fixed
>>      };
>> @@ -966,12 +1041,39 @@ SDNode *AArch64DAGToDAGISel::Select(SDNo
>>    case AArch64ISD::NEON_ST4_UPD: {
>>      static const uint16_t Opcodes[] = {
>>        AArch64::ST4WB_8B_fixed,  AArch64::ST4WB_4H_fixed,
>> -      AArch64::ST4WB_2S_fixed,  AArch64::ST1WB4V_1D_fixed,
>> +      AArch64::ST4WB_2S_fixed,  AArch64::ST1x4WB_1D_fixed,
>>        AArch64::ST4WB_16B_fixed, AArch64::ST4WB_8H_fixed,
>>        AArch64::ST4WB_4S_fixed,  AArch64::ST4WB_2D_fixed
>>      };
>>      return SelectVST(Node, 4, true, Opcodes);
>>    }
>> +  case AArch64ISD::NEON_ST1x2_UPD: {
>> +    static const uint16_t Opcodes[] = {
>> +      AArch64::ST1x2WB_8B_fixed,  AArch64::ST1x2WB_4H_fixed,
>> +      AArch64::ST1x2WB_2S_fixed,  AArch64::ST1x2WB_1D_fixed,
>> +      AArch64::ST1x2WB_16B_fixed, AArch64::ST1x2WB_8H_fixed,
>> +      AArch64::ST1x2WB_4S_fixed,  AArch64::ST1x2WB_2D_fixed
>> +    };
>> +    return SelectVST(Node, 2, true, Opcodes);
>> +  }
>> +  case AArch64ISD::NEON_ST1x3_UPD: {
>> +    static const uint16_t Opcodes[] = {
>> +      AArch64::ST1x3WB_8B_fixed,  AArch64::ST1x3WB_4H_fixed,
>> +      AArch64::ST1x3WB_2S_fixed,  AArch64::ST1x3WB_1D_fixed,
>> +      AArch64::ST1x3WB_16B_fixed, AArch64::ST1x3WB_8H_fixed,
>> +      AArch64::ST1x3WB_4S_fixed,  AArch64::ST1x3WB_2D_fixed
>> +    };
>> +    return SelectVST(Node, 3, true, Opcodes);
>> +  }
>> +  case AArch64ISD::NEON_ST1x4_UPD: {
>> +    static const uint16_t Opcodes[] = {
>> +      AArch64::ST1x4WB_8B_fixed,  AArch64::ST1x4WB_4H_fixed,
>> +      AArch64::ST1x4WB_2S_fixed,  AArch64::ST1x4WB_1D_fixed,
>> +      AArch64::ST1x4WB_16B_fixed, AArch64::ST1x4WB_8H_fixed,
>> +      AArch64::ST1x4WB_4S_fixed,  AArch64::ST1x4WB_2D_fixed
>> +    };
>> +    return SelectVST(Node, 4, true, Opcodes);
>> +  }
>>    case ISD::INTRINSIC_WO_CHAIN: {
>>      unsigned IntNo =
>> cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
>>      bool IsExt = false;
>> @@ -1013,25 +1115,49 @@ SDNode *AArch64DAGToDAGISel::Select(SDNo
>>      }
>>      case Intrinsic::arm_neon_vld2: {
>>        static const uint16_t Opcodes[] = { AArch64::LD2_8B,
>> AArch64::LD2_4H,
>> -                                          AArch64::LD2_2S,
>> AArch64::LD1_2V_1D,
>> +                                          AArch64::LD2_2S,
>> AArch64::LD1x2_1D,
>>                                            AArch64::LD2_16B,
>> AArch64::LD2_8H,
>>                                            AArch64::LD2_4S,
>> AArch64::LD2_2D };
>>        return SelectVLD(Node, 2, false, Opcodes);
>>      }
>>      case Intrinsic::arm_neon_vld3: {
>>        static const uint16_t Opcodes[] = { AArch64::LD3_8B,
>> AArch64::LD3_4H,
>> -                                          AArch64::LD3_2S,
>> AArch64::LD1_3V_1D,
>> +                                          AArch64::LD3_2S,
>> AArch64::LD1x3_1D,
>>                                            AArch64::LD3_16B,
>> AArch64::LD3_8H,
>>                                            AArch64::LD3_4S,
>> AArch64::LD3_2D };
>>        return SelectVLD(Node, 3, false, Opcodes);
>>      }
>>      case Intrinsic::arm_neon_vld4: {
>>        static const uint16_t Opcodes[] = { AArch64::LD4_8B,
>> AArch64::LD4_4H,
>> -                                          AArch64::LD4_2S,
>> AArch64::LD1_4V_1D,
>> +                                          AArch64::LD4_2S,
>> AArch64::LD1x4_1D,
>>                                            AArch64::LD4_16B,
>> AArch64::LD4_8H,
>>                                            AArch64::LD4_4S,
>> AArch64::LD4_2D };
>>        return SelectVLD(Node, 4, false, Opcodes);
>>      }
>> +    case Intrinsic::aarch64_neon_vld1x2: {
>> +      static const uint16_t Opcodes[] = {
>> +        AArch64::LD1x2_8B, AArch64::LD1x2_4H,  AArch64::LD1x2_2S,
>> +        AArch64::LD1x2_1D, AArch64::LD1x2_16B, AArch64::LD1x2_8H,
>> +        AArch64::LD1x2_4S, AArch64::LD1x2_2D
>> +      };
>> +      return SelectVLD(Node, 2, false, Opcodes);
>> +    }
>> +    case Intrinsic::aarch64_neon_vld1x3: {
>> +      static const uint16_t Opcodes[] = {
>> +        AArch64::LD1x3_8B, AArch64::LD1x3_4H,  AArch64::LD1x3_2S,
>> +        AArch64::LD1x3_1D, AArch64::LD1x3_16B, AArch64::LD1x3_8H,
>> +        AArch64::LD1x3_4S, AArch64::LD1x3_2D
>> +      };
>> +      return SelectVLD(Node, 3, false, Opcodes);
>> +    }
>> +    case Intrinsic::aarch64_neon_vld1x4: {
>> +      static const uint16_t Opcodes[] = {
>> +        AArch64::LD1x4_8B, AArch64::LD1x4_4H,  AArch64::LD1x4_2S,
>> +        AArch64::LD1x4_1D, AArch64::LD1x4_16B, AArch64::LD1x4_8H,
>> +        AArch64::LD1x4_4S, AArch64::LD1x4_2D
>> +      };
>> +      return SelectVLD(Node, 4, false, Opcodes);
>> +    }
>>      case Intrinsic::arm_neon_vst1: {
>>        static const uint16_t Opcodes[] = { AArch64::ST1_8B,
>> AArch64::ST1_4H,
>>                                            AArch64::ST1_2S,
>> AArch64::ST1_1D,
>> @@ -1041,25 +1167,49 @@ SDNode *AArch64DAGToDAGISel::Select(SDNo
>>      }
>>      case Intrinsic::arm_neon_vst2: {
>>        static const uint16_t Opcodes[] = { AArch64::ST2_8B,
>> AArch64::ST2_4H,
>> -                                          AArch64::ST2_2S,
>> AArch64::ST1_2V_1D,
>> +                                          AArch64::ST2_2S,
>> AArch64::ST1x2_1D,
>>                                            AArch64::ST2_16B,
>> AArch64::ST2_8H,
>>                                            AArch64::ST2_4S,
>> AArch64::ST2_2D };
>>        return SelectVST(Node, 2, false, Opcodes);
>>      }
>>      case Intrinsic::arm_neon_vst3: {
>>        static const uint16_t Opcodes[] = { AArch64::ST3_8B,
>> AArch64::ST3_4H,
>> -                                          AArch64::ST3_2S,
>> AArch64::ST1_3V_1D,
>> +                                          AArch64::ST3_2S,
>> AArch64::ST1x3_1D,
>>                                            AArch64::ST3_16B,
>> AArch64::ST3_8H,
>>                                            AArch64::ST3_4S,
>> AArch64::ST3_2D };
>>        return SelectVST(Node, 3, false, Opcodes);
>>      }
>>      case Intrinsic::arm_neon_vst4: {
>>        static const uint16_t Opcodes[] = { AArch64::ST4_8B,
>> AArch64::ST4_4H,
>> -                                          AArch64::ST4_2S,
>> AArch64::ST1_4V_1D,
>> +                                          AArch64::ST4_2S,
>> AArch64::ST1x4_1D,
>>                                            AArch64::ST4_16B,
>> AArch64::ST4_8H,
>>                                            AArch64::ST4_4S,
>> AArch64::ST4_2D };
>>        return SelectVST(Node, 4, false, Opcodes);
>>      }
>> +    case Intrinsic::aarch64_neon_vst1x2: {
>> +      static const uint16_t Opcodes[] = {
>> +        AArch64::ST1x2_8B, AArch64::ST1x2_4H,  AArch64::ST1x2_2S,
>> +        AArch64::ST1x2_1D, AArch64::ST1x2_16B, AArch64::ST1x2_8H,
>> +        AArch64::ST1x2_4S, AArch64::ST1x2_2D
>> +      };
>> +      return SelectVST(Node, 2, false, Opcodes);
>> +    }
>> +    case Intrinsic::aarch64_neon_vst1x3: {
>> +      static const uint16_t Opcodes[] = {
>> +        AArch64::ST1x3_8B, AArch64::ST1x3_4H,  AArch64::ST1x3_2S,
>> +        AArch64::ST1x3_1D, AArch64::ST1x3_16B, AArch64::ST1x3_8H,
>> +        AArch64::ST1x3_4S, AArch64::ST1x3_2D
>> +      };
>> +      return SelectVST(Node, 3, false, Opcodes);
>> +    }
>> +    case Intrinsic::aarch64_neon_vst1x4: {
>> +      static const uint16_t Opcodes[] = {
>> +        AArch64::ST1x4_8B, AArch64::ST1x4_4H,  AArch64::ST1x4_2S,
>> +        AArch64::ST1x4_1D, AArch64::ST1x4_16B, AArch64::ST1x4_8H,
>> +        AArch64::ST1x4_4S, AArch64::ST1x4_2D
>> +      };
>> +      return SelectVST(Node, 4, false, Opcodes);
>> +    }
>>      }
>>      break;
>>    }
>>
>> Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
>> URL:
>>
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISe
> lLowering.cpp?rev=194990&r1=194989&r2=194990&view=diff
>>
> ============================================================================
> ==
>> --- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
>> +++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Mon Nov 18
>> 00:31:53 2013
>> @@ -937,6 +937,18 @@ const char *AArch64TargetLowering::getTa
>>      return "AArch64ISD::NEON_ST3_UPD";
>>    case AArch64ISD::NEON_ST4_UPD:
>>      return "AArch64ISD::NEON_ST4_UPD";
>> +  case AArch64ISD::NEON_LD1x2_UPD:
>> +    return "AArch64ISD::NEON_LD1x2_UPD";
>> +  case AArch64ISD::NEON_LD1x3_UPD:
>> +    return "AArch64ISD::NEON_LD1x3_UPD";
>> +  case AArch64ISD::NEON_LD1x4_UPD:
>> +    return "AArch64ISD::NEON_LD1x4_UPD";
>> +  case AArch64ISD::NEON_ST1x2_UPD:
>> +    return "AArch64ISD::NEON_ST1x2_UPD";
>> +  case AArch64ISD::NEON_ST1x3_UPD:
>> +    return "AArch64ISD::NEON_ST1x3_UPD";
>> +  case AArch64ISD::NEON_ST1x4_UPD:
>> +    return "AArch64ISD::NEON_ST1x4_UPD";
>>    case AArch64ISD::NEON_VEXTRACT:
>>      return "AArch64ISD::NEON_VEXTRACT";
>>    default:
>> @@ -3545,6 +3557,18 @@ static SDValue CombineBaseUpdate(SDNode
>>        NumVecs = 3; isLoad = false; break;
>>      case Intrinsic::arm_neon_vst4:     NewOpc =
>> AArch64ISD::NEON_ST4_UPD;
>>        NumVecs = 4; isLoad = false; break;
>> +    case Intrinsic::aarch64_neon_vld1x2: NewOpc =
>> AArch64ISD::NEON_LD1x2_UPD;
>> +      NumVecs = 2; break;
>> +    case Intrinsic::aarch64_neon_vld1x3: NewOpc =
>> AArch64ISD::NEON_LD1x3_UPD;
>> +      NumVecs = 3; break;
>> +    case Intrinsic::aarch64_neon_vld1x4: NewOpc =
>> AArch64ISD::NEON_LD1x4_UPD;
>> +      NumVecs = 4; break;
>> +    case Intrinsic::aarch64_neon_vst1x2: NewOpc =
>> AArch64ISD::NEON_ST1x2_UPD;
>> +      NumVecs = 2; isLoad = false; break;
>> +    case Intrinsic::aarch64_neon_vst1x3: NewOpc =
>> AArch64ISD::NEON_ST1x3_UPD;
>> +      NumVecs = 3; isLoad = false; break;
>> +    case Intrinsic::aarch64_neon_vst1x4: NewOpc =
>> AArch64ISD::NEON_ST1x4_UPD;
>> +      NumVecs = 4; isLoad = false; break;
>>      }
>>
>>      // Find the size of memory referenced by the load/store.
>> @@ -3624,6 +3648,12 @@ AArch64TargetLowering::PerformDAGCombine
>>      case Intrinsic::arm_neon_vst2:
>>      case Intrinsic::arm_neon_vst3:
>>      case Intrinsic::arm_neon_vst4:
>> +    case Intrinsic::aarch64_neon_vld1x2:
>> +    case Intrinsic::aarch64_neon_vld1x3:
>> +    case Intrinsic::aarch64_neon_vld1x4:
>> +    case Intrinsic::aarch64_neon_vst1x2:
>> +    case Intrinsic::aarch64_neon_vst1x3:
>> +    case Intrinsic::aarch64_neon_vst1x4:
>>        return CombineBaseUpdate(N, DCI);
>>      default:
>>        break;
>> @@ -4170,7 +4200,10 @@ bool AArch64TargetLowering::getTgtMemInt
>>    case Intrinsic::arm_neon_vld1:
>>    case Intrinsic::arm_neon_vld2:
>>    case Intrinsic::arm_neon_vld3:
>> -  case Intrinsic::arm_neon_vld4: {
>> +  case Intrinsic::arm_neon_vld4:
>> +  case Intrinsic::aarch64_neon_vld1x2:
>> +  case Intrinsic::aarch64_neon_vld1x3:
>> +  case Intrinsic::aarch64_neon_vld1x4: {
>>      Info.opc = ISD::INTRINSIC_W_CHAIN;
>>      // Conservatively set memVT to the entire set of vectors loaded.
>>      uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) /
>> 8;
>> @@ -4187,7 +4220,10 @@ bool AArch64TargetLowering::getTgtMemInt
>>    case Intrinsic::arm_neon_vst1:
>>    case Intrinsic::arm_neon_vst2:
>>    case Intrinsic::arm_neon_vst3:
>> -  case Intrinsic::arm_neon_vst4: {
>> +  case Intrinsic::arm_neon_vst4:
>> +  case Intrinsic::aarch64_neon_vst1x2:
>> +  case Intrinsic::aarch64_neon_vst1x3:
>> +  case Intrinsic::aarch64_neon_vst1x4: {
>>      Info.opc = ISD::INTRINSIC_VOID;
>>      // Conservatively set memVT to the entire set of vectors stored.
>>      unsigned NumElts = 0;
>>
>> Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h
>> URL:
>>
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISe
> lLowering.h?rev=194990&r1=194989&r2=194990&view=diff
>>
> ============================================================================
> ==
>> --- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h (original)
>> +++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h Mon Nov 18
>> 00:31:53 2013
>> @@ -157,12 +157,18 @@ namespace AArch64ISD {
>>      NEON_LD2_UPD,
>>      NEON_LD3_UPD,
>>      NEON_LD4_UPD,
>> +    NEON_LD1x2_UPD,
>> +    NEON_LD1x3_UPD,
>> +    NEON_LD1x4_UPD,
>>
>>      // NEON stores with post-increment base updates:
>>      NEON_ST1_UPD,
>>      NEON_ST2_UPD,
>>      NEON_ST3_UPD,
>> -    NEON_ST4_UPD
>> +    NEON_ST4_UPD,
>> +    NEON_ST1x2_UPD,
>> +    NEON_ST1x3_UPD,
>> +    NEON_ST1x4_UPD
>>    };
>>  }
>>
>>
>> Modified: llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td
>> URL:
>>
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64Ins
> trNEON.td?rev=194990&r1=194989&r2=194990&view=diff
>>
> ============================================================================
> ==
>> --- llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td (original)
>> +++ llvm/trunk/lib/Target/AArch64/AArch64InstrNEON.td Mon Nov 18
>> 00:31:53
>> 2013
>> @@ -3364,14 +3364,14 @@ defm LD3 : LDVList_BHSD<0b0100, "VTriple
>>  defm LD4 : LDVList_BHSD<0b0000, "VQuad", "ld4">;
>>
>>  // Load multiple 1-element structure to N consecutive registers (N =
>> 2,3,4)
>> -defm LD1_2V : LDVList_BHSD<0b1010, "VPair", "ld1">;
>> -def LD1_2V_1D : NeonI_LDVList<0, 0b1010, 0b11, VPair1D_operand, "ld1">;
>> +defm LD1x2 : LDVList_BHSD<0b1010, "VPair", "ld1">;
>> +def LD1x2_1D : NeonI_LDVList<0, 0b1010, 0b11, VPair1D_operand, "ld1">;
>>
>> -defm LD1_3V : LDVList_BHSD<0b0110, "VTriple", "ld1">;
>> -def LD1_3V_1D : NeonI_LDVList<0, 0b0110, 0b11, VTriple1D_operand,
>> "ld1">;
>> +defm LD1x3 : LDVList_BHSD<0b0110, "VTriple", "ld1">;
>> +def LD1x3_1D : NeonI_LDVList<0, 0b0110, 0b11, VTriple1D_operand,
>> "ld1">;
>>
>> -defm LD1_4V : LDVList_BHSD<0b0010, "VQuad", "ld1">;
>> -def LD1_4V_1D : NeonI_LDVList<0, 0b0010, 0b11, VQuad1D_operand, "ld1">;
>> +defm LD1x4 : LDVList_BHSD<0b0010, "VQuad", "ld1">;
>> +def LD1x4_1D : NeonI_LDVList<0, 0b0010, 0b11, VQuad1D_operand, "ld1">;
>>
>>  class NeonI_STVList<bit q, bits<4> opcode, bits<2> size,
>>                      RegisterOperand VecList, string asmop>
>> @@ -3418,14 +3418,14 @@ defm ST3 : STVList_BHSD<0b0100, "VTriple
>>  defm ST4 : STVList_BHSD<0b0000, "VQuad", "st4">;
>>
>>  // Store multiple 1-element structures from N consecutive registers (N
>> =
>> 2,3,4)
>> -defm ST1_2V : STVList_BHSD<0b1010, "VPair", "st1">;
>> -def ST1_2V_1D : NeonI_STVList<0, 0b1010, 0b11, VPair1D_operand, "st1">;
>> +defm ST1x2 : STVList_BHSD<0b1010, "VPair", "st1">;
>> +def ST1x2_1D : NeonI_STVList<0, 0b1010, 0b11, VPair1D_operand, "st1">;
>>
>> -defm ST1_3V : STVList_BHSD<0b0110, "VTriple", "st1">;
>> -def ST1_3V_1D : NeonI_STVList<0, 0b0110, 0b11, VTriple1D_operand,
>> "st1">;
>> +defm ST1x3 : STVList_BHSD<0b0110, "VTriple", "st1">;
>> +def ST1x3_1D : NeonI_STVList<0, 0b0110, 0b11, VTriple1D_operand,
>> "st1">;
>>
>> -defm ST1_4V : STVList_BHSD<0b0010, "VQuad", "st1">;
>> -def ST1_4V_1D : NeonI_STVList<0, 0b0010, 0b11, VQuad1D_operand, "st1">;
>> +defm ST1x4 : STVList_BHSD<0b0010, "VQuad", "st1">;
>> +def ST1x4_1D : NeonI_STVList<0, 0b0010, 0b11, VQuad1D_operand, "st1">;
>>
>>  // End of vector load/store multiple N-element structure(class SIMD
>> lselem)
>>
>> @@ -3553,19 +3553,19 @@ defm LD4WB : LDWB_VList_BHSD<0b0000, "VQ
>>
>>  // Post-index load multiple 1-element structures from N consecutive
>> registers
>>  // (N = 2,3,4)
>> -defm LD1WB2V : LDWB_VList_BHSD<0b1010, "VPair", uimm_exact16,
>> uimm_exact32,
>> +defm LD1x2WB : LDWB_VList_BHSD<0b1010, "VPair", uimm_exact16,
>> uimm_exact32,
>>                                 "ld1">;
>> -defm LD1WB2V_1D : NeonI_LDWB_VList<0, 0b1010, 0b11, VPair1D_operand,
>> +defm LD1x2WB_1D : NeonI_LDWB_VList<0, 0b1010, 0b11, VPair1D_operand,
>>                                     uimm_exact16, "ld1">;
>>
>> -defm LD1WB3V : LDWB_VList_BHSD<0b0110, "VTriple", uimm_exact24,
>> uimm_exact48,
>> +defm LD1x3WB : LDWB_VList_BHSD<0b0110, "VTriple", uimm_exact24,
>> uimm_exact48,
>>                                 "ld1">;
>> -defm LD1WB3V_1D : NeonI_LDWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
>> +defm LD1x3WB_1D : NeonI_LDWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
>>                                     uimm_exact24, "ld1">;
>>
>> -defm LD1WB_4V : LDWB_VList_BHSD<0b0010, "VQuad", uimm_exact32,
>> uimm_exact64,
>> +defm LD1x4WB : LDWB_VList_BHSD<0b0010, "VQuad", uimm_exact32,
>> uimm_exact64,
>>                                  "ld1">;
>> -defm LD1WB4V_1D : NeonI_LDWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
>> +defm LD1x4WB_1D : NeonI_LDWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
>>                                     uimm_exact32, "ld1">;
>>
>>  multiclass NeonI_STWB_VList<bit q, bits<4> opcode, bits<2> size,
>> @@ -3635,19 +3635,19 @@ defm ST4WB : STWB_VList_BHSD<0b0000, "VQ
>>
>>  // Post-index load multiple 1-element structures from N consecutive
>> registers
>>  // (N = 2,3,4)
>> -defm ST1WB2V : STWB_VList_BHSD<0b1010, "VPair", uimm_exact16,
>> uimm_exact32,
>> +defm ST1x2WB : STWB_VList_BHSD<0b1010, "VPair", uimm_exact16,
>> uimm_exact32,
>>                                 "st1">;
>> -defm ST1WB2V_1D : NeonI_STWB_VList<0, 0b1010, 0b11, VPair1D_operand,
>> +defm ST1x2WB_1D : NeonI_STWB_VList<0, 0b1010, 0b11, VPair1D_operand,
>>                                     uimm_exact16, "st1">;
>>
>> -defm ST1WB3V : STWB_VList_BHSD<0b0110, "VTriple", uimm_exact24,
>> uimm_exact48,
>> +defm ST1x3WB : STWB_VList_BHSD<0b0110, "VTriple", uimm_exact24,
>> uimm_exact48,
>>                                 "st1">;
>> -defm ST1WB3V_1D : NeonI_STWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
>> +defm ST1x3WB_1D : NeonI_STWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
>>                                     uimm_exact24, "st1">;
>>
>> -defm ST1WB4V : STWB_VList_BHSD<0b0010, "VQuad", uimm_exact32,
>> uimm_exact64,
>> +defm ST1x4WB : STWB_VList_BHSD<0b0010, "VQuad", uimm_exact32,
>> uimm_exact64,
>>                                 "st1">;
>> -defm ST1WB4V_1D : NeonI_STWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
>> +defm ST1x4WB_1D : NeonI_STWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
>>                                     uimm_exact32, "st1">;
>>
>>  // End of post-index vector load/store multiple N-element structure
>>
>> Modified: llvm/trunk/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll
>> URL:
>>
> http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/neon-sim
> d-ldst-multi-elem.ll?rev=194990&r1=194989&r2=194990&view=diff
>>
> ============================================================================
> ==
>> --- llvm/trunk/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll
>> (original)
>> +++ llvm/trunk/test/CodeGen/AArch64/neon-simd-ldst-multi-elem.ll Mon Nov
>> 18 00:31:53 2013
>> @@ -39,14 +39,14 @@
>>
>>
>>  define <16 x i8> @test_vld1q_s8(i8* readonly %a) {
>> -; CHECK: test_vld1q_s8
>> +; CHECK-LABEL: test_vld1q_s8
>>  ; CHECK: ld1 {v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}]
>>    %vld1 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %a, i32 1)
>>    ret <16 x i8> %vld1
>>  }
>>
>>  define <8 x i16> @test_vld1q_s16(i16* readonly %a) {
>> -; CHECK: test_vld1q_s16
>> +; CHECK-LABEL: test_vld1q_s16
>>  ; CHECK: ld1 {v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i16* %a to i8*
>>    %vld1 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %1, i32 2)
>> @@ -54,7 +54,7 @@ define <8 x i16> @test_vld1q_s16(i16* re
>>  }
>>
>>  define <4 x i32> @test_vld1q_s32(i32* readonly %a) {
>> -; CHECK: test_vld1q_s32
>> +; CHECK-LABEL: test_vld1q_s32
>>  ; CHECK: ld1 {v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i32* %a to i8*
>>    %vld1 = tail call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %1, i32 4)
>> @@ -62,7 +62,7 @@ define <4 x i32> @test_vld1q_s32(i32* re
>>  }
>>
>>  define <2 x i64> @test_vld1q_s64(i64* readonly %a) {
>> -; CHECK: test_vld1q_s64
>> +; CHECK-LABEL: test_vld1q_s64
>>  ; CHECK: ld1 {v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i64* %a to i8*
>>    %vld1 = tail call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %1, i32 8)
>> @@ -70,7 +70,7 @@ define <2 x i64> @test_vld1q_s64(i64* re
>>  }
>>
>>  define <4 x float> @test_vld1q_f32(float* readonly %a) {
>> -; CHECK: test_vld1q_f32
>> +; CHECK-LABEL: test_vld1q_f32
>>  ; CHECK: ld1 {v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast float* %a to i8*
>>    %vld1 = tail call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %1, i32
>> 4)
>> @@ -78,7 +78,7 @@ define <4 x float> @test_vld1q_f32(float
>>  }
>>
>>  define <2 x double> @test_vld1q_f64(double* readonly %a) {
>> -; CHECK: test_vld1q_f64
>> +; CHECK-LABEL: test_vld1q_f64
>>  ; CHECK: ld1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
>>    %1 = bitcast double* %a to i8*
>>    %vld1 = tail call <2 x double> @llvm.arm.neon.vld1.v2f64(i8* %1, i32
>> 8)
>> @@ -86,14 +86,14 @@ define <2 x double> @test_vld1q_f64(doub
>>  }
>>
>>  define <8 x i8> @test_vld1_s8(i8* readonly %a) {
>> -; CHECK: test_vld1_s8
>> +; CHECK-LABEL: test_vld1_s8
>>  ; CHECK: ld1 {v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
>>    %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %a, i32 1)
>>    ret <8 x i8> %vld1
>>  }
>>
>>  define <4 x i16> @test_vld1_s16(i16* readonly %a) {
>> -; CHECK: test_vld1_s16
>> +; CHECK-LABEL: test_vld1_s16
>>  ; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i16* %a to i8*
>>    %vld1 = tail call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %1, i32 2)
>> @@ -101,7 +101,7 @@ define <4 x i16> @test_vld1_s16(i16* rea
>>  }
>>
>>  define <2 x i32> @test_vld1_s32(i32* readonly %a) {
>> -; CHECK: test_vld1_s32
>> +; CHECK-LABEL: test_vld1_s32
>>  ; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i32* %a to i8*
>>    %vld1 = tail call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %1, i32 4)
>> @@ -109,7 +109,7 @@ define <2 x i32> @test_vld1_s32(i32* rea
>>  }
>>
>>  define <1 x i64> @test_vld1_s64(i64* readonly %a) {
>> -; CHECK: test_vld1_s64
>> +; CHECK-LABEL: test_vld1_s64
>>  ; CHECK: ld1 {v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i64* %a to i8*
>>    %vld1 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %1, i32 8)
>> @@ -117,7 +117,7 @@ define <1 x i64> @test_vld1_s64(i64* rea
>>  }
>>
>>  define <2 x float> @test_vld1_f32(float* readonly %a) {
>> -; CHECK: test_vld1_f32
>> +; CHECK-LABEL: test_vld1_f32
>>  ; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast float* %a to i8*
>>    %vld1 = tail call <2 x float> @llvm.arm.neon.vld1.v2f32(i8* %1, i32
>> 4)
>> @@ -125,7 +125,7 @@ define <2 x float> @test_vld1_f32(float*
>>  }
>>
>>  define <1 x double> @test_vld1_f64(double* readonly %a) {
>> -; CHECK: test_vld1_f64
>> +; CHECK-LABEL: test_vld1_f64
>>  ; CHECK: ld1 {v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast double* %a to i8*
>>    %vld1 = tail call <1 x double> @llvm.arm.neon.vld1.v1f64(i8* %1, i32
>> 8)
>> @@ -133,14 +133,14 @@ define <1 x double> @test_vld1_f64(doubl
>>  }
>>
>>  define <8 x i8> @test_vld1_p8(i8* readonly %a) {
>> -; CHECK: test_vld1_p8
>> +; CHECK-LABEL: test_vld1_p8
>>  ; CHECK: ld1 {v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
>>    %vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %a, i32 1)
>>    ret <8 x i8> %vld1
>>  }
>>
>>  define <4 x i16> @test_vld1_p16(i16* readonly %a) {
>> -; CHECK: test_vld1_p16
>> +; CHECK-LABEL: test_vld1_p16
>>  ; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i16* %a to i8*
>>    %vld1 = tail call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %1, i32 2)
>> @@ -148,7 +148,7 @@ define <4 x i16> @test_vld1_p16(i16* rea
>>  }
>>
>>  define %struct.int8x16x2_t @test_vld2q_s8(i8* readonly %a) {
>> -; CHECK: test_vld2q_s8
>> +; CHECK-LABEL: test_vld2q_s8
>>  ; CHECK: ld2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}]
>>    %vld2 = tail call { <16 x i8>, <16 x i8> }
>> @llvm.arm.neon.vld2.v16i8(i8* %a, i32 1)
>>    %vld2.fca.0.extract = extractvalue { <16 x i8>, <16 x i8> } %vld2, 0
>> @@ -159,7 +159,7 @@ define %struct.int8x16x2_t @test_vld2q_s
>>  }
>>
>>  define %struct.int16x8x2_t @test_vld2q_s16(i16* readonly %a) {
>> -; CHECK: test_vld2q_s16
>> +; CHECK-LABEL: test_vld2q_s16
>>  ; CHECK: ld2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i16* %a to i8*
>>    %vld2 = tail call { <8 x i16>, <8 x i16> }
>> @llvm.arm.neon.vld2.v8i16(i8* %1, i32 2)
>> @@ -171,7 +171,7 @@ define %struct.int16x8x2_t @test_vld2q_s
>>  }
>>
>>  define %struct.int32x4x2_t @test_vld2q_s32(i32* readonly %a) {
>> -; CHECK: test_vld2q_s32
>> +; CHECK-LABEL: test_vld2q_s32
>>  ; CHECK: ld2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i32* %a to i8*
>>    %vld2 = tail call { <4 x i32>, <4 x i32> }
>> @llvm.arm.neon.vld2.v4i32(i8* %1, i32 4)
>> @@ -183,7 +183,7 @@ define %struct.int32x4x2_t @test_vld2q_s
>>  }
>>
>>  define %struct.int64x2x2_t @test_vld2q_s64(i64* readonly %a) {
>> -; CHECK: test_vld2q_s64
>> +; CHECK-LABEL: test_vld2q_s64
>>  ; CHECK: ld2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i64* %a to i8*
>>    %vld2 = tail call { <2 x i64>, <2 x i64> }
>> @llvm.arm.neon.vld2.v2i64(i8* %1, i32 8)
>> @@ -195,7 +195,7 @@ define %struct.int64x2x2_t @test_vld2q_s
>>  }
>>
>>  define %struct.float32x4x2_t @test_vld2q_f32(float* readonly %a) {
>> -; CHECK: test_vld2q_f32
>> +; CHECK-LABEL: test_vld2q_f32
>>  ; CHECK: ld2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast float* %a to i8*
>>    %vld2 = tail call { <4 x float>, <4 x float> }
>> @llvm.arm.neon.vld2.v4f32(i8* %1, i32 4)
>> @@ -207,7 +207,7 @@ define %struct.float32x4x2_t @test_vld2q
>>  }
>>
>>  define %struct.float64x2x2_t @test_vld2q_f64(double* readonly %a) {
>> -; CHECK: test_vld2q_f64
>> +; CHECK-LABEL: test_vld2q_f64
>>  ; CHECK: ld2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast double* %a to i8*
>>    %vld2 = tail call { <2 x double>, <2 x double> }
>> @llvm.arm.neon.vld2.v2f64(i8* %1, i32 8)
>> @@ -219,7 +219,7 @@ define %struct.float64x2x2_t @test_vld2q
>>  }
>>
>>  define %struct.int8x8x2_t @test_vld2_s8(i8* readonly %a) {
>> -; CHECK: test_vld2_s8
>> +; CHECK-LABEL: test_vld2_s8
>>  ; CHECK: ld2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
>>    %vld2 = tail call { <8 x i8>, <8 x i8> } @llvm.arm.neon.vld2.v8i8(i8*
>> %a, i32 1)
>>    %vld2.fca.0.extract = extractvalue { <8 x i8>, <8 x i8> } %vld2, 0
>> @@ -230,7 +230,7 @@ define %struct.int8x8x2_t @test_vld2_s8(
>>  }
>>
>>  define %struct.int16x4x2_t @test_vld2_s16(i16* readonly %a) {
>> -; CHECK: test_vld2_s16
>> +; CHECK-LABEL: test_vld2_s16
>>  ; CHECK: ld2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i16* %a to i8*
>>    %vld2 = tail call { <4 x i16>, <4 x i16> }
>> @llvm.arm.neon.vld2.v4i16(i8* %1, i32 2)
>> @@ -242,7 +242,7 @@ define %struct.int16x4x2_t @test_vld2_s1
>>  }
>>
>>  define %struct.int32x2x2_t @test_vld2_s32(i32* readonly %a) {
>> -; CHECK: test_vld2_s32
>> +; CHECK-LABEL: test_vld2_s32
>>  ; CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i32* %a to i8*
>>    %vld2 = tail call { <2 x i32>, <2 x i32> }
>> @llvm.arm.neon.vld2.v2i32(i8* %1, i32 4)
>> @@ -254,7 +254,7 @@ define %struct.int32x2x2_t @test_vld2_s3
>>  }
>>
>>  define %struct.int64x1x2_t @test_vld2_s64(i64* readonly %a) {
>> -; CHECK: test_vld2_s64
>> +; CHECK-LABEL: test_vld2_s64
>>  ; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i64* %a to i8*
>>    %vld2 = tail call { <1 x i64>, <1 x i64> }
>> @llvm.arm.neon.vld2.v1i64(i8* %1, i32 8)
>> @@ -266,7 +266,7 @@ define %struct.int64x1x2_t @test_vld2_s6
>>  }
>>
>>  define %struct.float32x2x2_t @test_vld2_f32(float* readonly %a) {
>> -; CHECK: test_vld2_f32
>> +; CHECK-LABEL: test_vld2_f32
>>  ; CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast float* %a to i8*
>>    %vld2 = tail call { <2 x float>, <2 x float> }
>> @llvm.arm.neon.vld2.v2f32(i8* %1, i32 4)
>> @@ -278,7 +278,7 @@ define %struct.float32x2x2_t @test_vld2_
>>  }
>>
>>  define %struct.float64x1x2_t @test_vld2_f64(double* readonly %a) {
>> -; CHECK: test_vld2_f64
>> +; CHECK-LABEL: test_vld2_f64
>>  ; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast double* %a to i8*
>>    %vld2 = tail call { <1 x double>, <1 x double> }
>> @llvm.arm.neon.vld2.v1f64(i8* %1, i32 8)
>> @@ -290,7 +290,7 @@ define %struct.float64x1x2_t @test_vld2_
>>  }
>>
>>  define %struct.int8x16x3_t @test_vld3q_s8(i8* readonly %a) {
>> -; CHECK: test_vld3q_s8
>> +; CHECK-LABEL: test_vld3q_s8
>>  ; CHECK: ld3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b},
>> [x{{[0-9]+|sp}}]
>>    %vld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> }
>> @llvm.arm.neon.vld3.v16i8(i8* %a, i32 1)
>>    %vld3.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>
>> }
>> %vld3, 0
>> @@ -303,7 +303,7 @@ define %struct.int8x16x3_t @test_vld3q_s
>>  }
>>
>>  define %struct.int16x8x3_t @test_vld3q_s16(i16* readonly %a) {
>> -; CHECK: test_vld3q_s16
>> +; CHECK-LABEL: test_vld3q_s16
>>  ; CHECK: ld3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h},
>> [x{{[0-9]+|sp}}]
>>    %1 = bitcast i16* %a to i8*
>>    %vld3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> }
>> @llvm.arm.neon.vld3.v8i16(i8* %1, i32 2)
>> @@ -317,7 +317,7 @@ define %struct.int16x8x3_t @test_vld3q_s
>>  }
>>
>>  define %struct.int32x4x3_t @test_vld3q_s32(i32* readonly %a) {
>> -; CHECK: test_vld3q_s32
>> +; CHECK-LABEL: test_vld3q_s32
>>  ; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
>> [x{{[0-9]+|sp}}]
>>    %1 = bitcast i32* %a to i8*
>>    %vld3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> }
>> @llvm.arm.neon.vld3.v4i32(i8* %1, i32 4)
>> @@ -331,7 +331,7 @@ define %struct.int32x4x3_t @test_vld3q_s
>>  }
>>
>>  define %struct.int64x2x3_t @test_vld3q_s64(i64* readonly %a) {
>> -; CHECK: test_vld3q_s64
>> +; CHECK-LABEL: test_vld3q_s64
>>  ; CHECK: ld3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
>> [x{{[0-9]+|sp}}]
>>    %1 = bitcast i64* %a to i8*
>>    %vld3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> }
>> @llvm.arm.neon.vld3.v2i64(i8* %1, i32 8)
>> @@ -345,7 +345,7 @@ define %struct.int64x2x3_t @test_vld3q_s
>>  }
>>
>>  define %struct.float32x4x3_t @test_vld3q_f32(float* readonly %a) {
>> -; CHECK: test_vld3q_f32
>> +; CHECK-LABEL: test_vld3q_f32
>>  ; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
>> [x{{[0-9]+|sp}}]
>>    %1 = bitcast float* %a to i8*
>>    %vld3 = tail call { <4 x float>, <4 x float>, <4 x float> }
>> @llvm.arm.neon.vld3.v4f32(i8* %1, i32 4)
>> @@ -359,7 +359,7 @@ define %struct.float32x4x3_t @test_vld3q
>>  }
>>
>>  define %struct.float64x2x3_t @test_vld3q_f64(double* readonly %a) {
>> -; CHECK: test_vld3q_f64
>> +; CHECK-LABEL: test_vld3q_f64
>>  ; CHECK: ld3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
>> [x{{[0-9]+|sp}}]
>>    %1 = bitcast double* %a to i8*
>>    %vld3 = tail call { <2 x double>, <2 x double>, <2 x double> }
>> @llvm.arm.neon.vld3.v2f64(i8* %1, i32 8)
>> @@ -373,7 +373,7 @@ define %struct.float64x2x3_t @test_vld3q
>>  }
>>
>>  define %struct.int8x8x3_t @test_vld3_s8(i8* readonly %a) {
>> -; CHECK: test_vld3_s8
>> +; CHECK-LABEL: test_vld3_s8
>>  ; CHECK: ld3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b},
>> [x{{[0-9]+|sp}}]
>>    %vld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> }
>> @llvm.arm.neon.vld3.v8i8(i8* %a, i32 1)
>>    %vld3.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> }
>> %vld3, 0
>> @@ -386,7 +386,7 @@ define %struct.int8x8x3_t @test_vld3_s8(
>>  }
>>
>>  define %struct.int16x4x3_t @test_vld3_s16(i16* readonly %a) {
>> -; CHECK: test_vld3_s16
>> +; CHECK-LABEL: test_vld3_s16
>>  ; CHECK: ld3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h},
>> [x{{[0-9]+|sp}}]
>>    %1 = bitcast i16* %a to i8*
>>    %vld3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> }
>> @llvm.arm.neon.vld3.v4i16(i8* %1, i32 2)
>> @@ -400,7 +400,7 @@ define %struct.int16x4x3_t @test_vld3_s1
>>  }
>>
>>  define %struct.int32x2x3_t @test_vld3_s32(i32* readonly %a) {
>> -; CHECK: test_vld3_s32
>> +; CHECK-LABEL: test_vld3_s32
>>  ; CHECK: ld3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
>> [x{{[0-9]+|sp}}]
>>    %1 = bitcast i32* %a to i8*
>>    %vld3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> }
>> @llvm.arm.neon.vld3.v2i32(i8* %1, i32 4)
>> @@ -414,7 +414,7 @@ define %struct.int32x2x3_t @test_vld3_s3
>>  }
>>
>>  define %struct.int64x1x3_t @test_vld3_s64(i64* readonly %a) {
>> -; CHECK: test_vld3_s64
>> +; CHECK-LABEL: test_vld3_s64
>>  ; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
>> [x{{[0-9]+|sp}}]
>>    %1 = bitcast i64* %a to i8*
>>    %vld3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> }
>> @llvm.arm.neon.vld3.v1i64(i8* %1, i32 8)
>> @@ -428,7 +428,7 @@ define %struct.int64x1x3_t @test_vld3_s6
>>  }
>>
>>  define %struct.float32x2x3_t @test_vld3_f32(float* readonly %a) {
>> -; CHECK: test_vld3_f32
>> +; CHECK-LABEL: test_vld3_f32
>>  ; CHECK: ld3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
>> [x{{[0-9]+|sp}}]
>>    %1 = bitcast float* %a to i8*
>>    %vld3 = tail call { <2 x float>, <2 x float>, <2 x float> }
>> @llvm.arm.neon.vld3.v2f32(i8* %1, i32 4)
>> @@ -442,7 +442,7 @@ define %struct.float32x2x3_t @test_vld3_
>>  }
>>
>>  define %struct.float64x1x3_t @test_vld3_f64(double* readonly %a) {
>> -; CHECK: test_vld3_f64
>> +; CHECK-LABEL: test_vld3_f64
>>  ; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
>> [x{{[0-9]+|sp}}]
>>    %1 = bitcast double* %a to i8*
>>    %vld3 = tail call { <1 x double>, <1 x double>, <1 x double> }
>> @llvm.arm.neon.vld3.v1f64(i8* %1, i32 8)
>> @@ -456,7 +456,7 @@ define %struct.float64x1x3_t @test_vld3_
>>  }
>>
>>  define %struct.int8x16x4_t @test_vld4q_s8(i8* readonly %a) {
>> -; CHECK: test_vld4q_s8
>> +; CHECK-LABEL: test_vld4q_s8
>>  ; CHECK: ld4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b,
>> v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}]
>>    %vld4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> }
>> @llvm.arm.neon.vld4.v16i8(i8* %a, i32 1)
>>    %vld4.fca.0.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>,
>> <16 x i8> } %vld4, 0
>> @@ -471,7 +471,7 @@ define %struct.int8x16x4_t @test_vld4q_s
>>  }
>>
>>  define %struct.int16x8x4_t @test_vld4q_s16(i16* readonly %a) {
>> -; CHECK: test_vld4q_s16
>> +; CHECK-LABEL: test_vld4q_s16
>>  ; CHECK: ld4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h,
>> v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i16* %a to i8*
>>    %vld4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }
>> @llvm.arm.neon.vld4.v8i16(i8* %1, i32 2)
>> @@ -487,7 +487,7 @@ define %struct.int16x8x4_t @test_vld4q_s
>>  }
>>
>>  define %struct.int32x4x4_t @test_vld4q_s32(i32* readonly %a) {
>> -; CHECK: test_vld4q_s32
>> +; CHECK-LABEL: test_vld4q_s32
>>  ; CHECK: ld4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
>> v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i32* %a to i8*
>>    %vld4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> }
>> @llvm.arm.neon.vld4.v4i32(i8* %1, i32 4)
>> @@ -503,7 +503,7 @@ define %struct.int32x4x4_t @test_vld4q_s
>>  }
>>
>>  define %struct.int64x2x4_t @test_vld4q_s64(i64* readonly %a) {
>> -; CHECK: test_vld4q_s64
>> +; CHECK-LABEL: test_vld4q_s64
>>  ; CHECK: ld4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
>> v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i64* %a to i8*
>>    %vld4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> }
>> @llvm.arm.neon.vld4.v2i64(i8* %1, i32 8)
>> @@ -519,7 +519,7 @@ define %struct.int64x2x4_t @test_vld4q_s
>>  }
>>
>>  define %struct.float32x4x4_t @test_vld4q_f32(float* readonly %a) {
>> -; CHECK: test_vld4q_f32
>> +; CHECK-LABEL: test_vld4q_f32
>>  ; CHECK: ld4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
>> v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast float* %a to i8*
>>    %vld4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x
>> float>
>> } @llvm.arm.neon.vld4.v4f32(i8* %1, i32 4)
>> @@ -535,7 +535,7 @@ define %struct.float32x4x4_t @test_vld4q
>>  }
>>
>>  define %struct.float64x2x4_t @test_vld4q_f64(double* readonly %a) {
>> -; CHECK: test_vld4q_f64
>> +; CHECK-LABEL: test_vld4q_f64
>>  ; CHECK: ld4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
>> v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast double* %a to i8*
>>    %vld4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x
>> double> } @llvm.arm.neon.vld4.v2f64(i8* %1, i32 8)
>> @@ -551,7 +551,7 @@ define %struct.float64x2x4_t @test_vld4q
>>  }
>>
>>  define %struct.int8x8x4_t @test_vld4_s8(i8* readonly %a) {
>> -; CHECK: test_vld4_s8
>> +; CHECK-LABEL: test_vld4_s8
>>  ; CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b,
>> v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}]
>>    %vld4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
>> @llvm.arm.neon.vld4.v8i8(i8* %a, i32 1)
>>    %vld4.fca.0.extract = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8
>> x
>> i8> } %vld4, 0
>> @@ -566,7 +566,7 @@ define %struct.int8x8x4_t @test_vld4_s8(
>>  }
>>
>>  define %struct.int16x4x4_t @test_vld4_s16(i16* readonly %a) {
>> -; CHECK: test_vld4_s16
>> +; CHECK-LABEL: test_vld4_s16
>>  ; CHECK: ld4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h,
>> v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i16* %a to i8*
>>    %vld4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
>> @llvm.arm.neon.vld4.v4i16(i8* %1, i32 2)
>> @@ -582,7 +582,7 @@ define %struct.int16x4x4_t @test_vld4_s1
>>  }
>>
>>  define %struct.int32x2x4_t @test_vld4_s32(i32* readonly %a) {
>> -; CHECK: test_vld4_s32
>> +; CHECK-LABEL: test_vld4_s32
>>  ; CHECK: ld4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
>> v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i32* %a to i8*
>>    %vld4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
>> @llvm.arm.neon.vld4.v2i32(i8* %1, i32 4)
>> @@ -598,7 +598,7 @@ define %struct.int32x2x4_t @test_vld4_s3
>>  }
>>
>>  define %struct.int64x1x4_t @test_vld4_s64(i64* readonly %a) {
>> -; CHECK: test_vld4_s64
>> +; CHECK-LABEL: test_vld4_s64
>>  ; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
>> v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast i64* %a to i8*
>>    %vld4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> }
>> @llvm.arm.neon.vld4.v1i64(i8* %1, i32 8)
>> @@ -614,7 +614,7 @@ define %struct.int64x1x4_t @test_vld4_s6
>>  }
>>
>>  define %struct.float32x2x4_t @test_vld4_f32(float* readonly %a) {
>> -; CHECK: test_vld4_f32
>> +; CHECK-LABEL: test_vld4_f32
>>  ; CHECK: ld4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
>> v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast float* %a to i8*
>>    %vld4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x
>> float>
>> } @llvm.arm.neon.vld4.v2f32(i8* %1, i32 4)
>> @@ -630,7 +630,7 @@ define %struct.float32x2x4_t @test_vld4_
>>  }
>>
>>  define %struct.float64x1x4_t @test_vld4_f64(double* readonly %a) {
>> -; CHECK: test_vld4_f64
>> +; CHECK-LABEL: test_vld4_f64
>>  ; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
>> v{{[0-9]+}}.1d}, [x{{[0-9]+|sp}}]
>>    %1 = bitcast double* %a to i8*
>>    %vld4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x
>> double> } @llvm.arm.neon.vld4.v1f64(i8* %1, i32 8)
>> @@ -695,14 +695,14 @@ declare { <2 x float>, <2 x float>, <2 x
>>  declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> }
>> @llvm.arm.neon.vld4.v1f64(i8*, i32)
>>
>>  define void @test_vst1q_s8(i8* %a, <16 x i8> %b) {
>> -; CHECK: test_vst1q_s8
>> +; CHECK-LABEL: test_vst1q_s8
>>  ; CHECK: st1 {v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
>>    tail call void @llvm.arm.neon.vst1.v16i8(i8* %a, <16 x i8> %b, i32 1)
>>    ret void
>>  }
>>
>>  define void @test_vst1q_s16(i16* %a, <8 x i16> %b) {
>> -; CHECK: test_vst1q_s16
>> +; CHECK-LABEL: test_vst1q_s16
>>  ; CHECK: st1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
>>    %1 = bitcast i16* %a to i8*
>>    tail call void @llvm.arm.neon.vst1.v8i16(i8* %1, <8 x i16> %b, i32 2)
>> @@ -710,7 +710,7 @@ define void @test_vst1q_s16(i16* %a, <8
>>  }
>>
>>  define void @test_vst1q_s32(i32* %a, <4 x i32> %b) {
>> -; CHECK: test_vst1q_s32
>> +; CHECK-LABEL: test_vst1q_s32
>>  ; CHECK: st1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
>>    %1 = bitcast i32* %a to i8*
>>    tail call void @llvm.arm.neon.vst1.v4i32(i8* %1, <4 x i32> %b, i32 4)
>> @@ -718,7 +718,7 @@ define void @test_vst1q_s32(i32* %a, <4
>>  }
>>
>>  define void @test_vst1q_s64(i64* %a, <2 x i64> %b) {
>> -; CHECK: test_vst1q_s64
>> +; CHECK-LABEL: test_vst1q_s64
>>  ; CHECK: st1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
>>    %1 = bitcast i64* %a to i8*
>>    tail call void @llvm.arm.neon.vst1.v2i64(i8* %1, <2 x i64> %b, i32 8)
>> @@ -726,7 +726,7 @@ define void @test_vst1q_s64(i64* %a, <2
>>  }
>>
>>  define void @test_vst1q_f32(float* %a, <4 x float> %b) {
>> -; CHECK: test_vst1q_f32
>> +; CHECK-LABEL: test_vst1q_f32
>>  ; CHECK: st1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
>>    %1 = bitcast float* %a to i8*
>>    tail call void @llvm.arm.neon.vst1.v4f32(i8* %1, <4 x float> %b, i32
>> 4)
>> @@ -734,7 +734,7 @@ define void @test_vst1q_f32(float* %a, <
>>  }
>>
>>  define void @test_vst1q_f64(double* %a, <2 x double> %b) {
>> -; CHECK: test_vst1q_f64
>> +; CHECK-LABEL: test_vst1q_f64
>>  ; CHECK: st1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
>>    %1 = bitcast double* %a to i8*
>>    tail call void @llvm.arm.neon.vst1.v2f64(i8* %1, <2 x double> %b, i32
>> 8)
>> @@ -742,14 +742,14 @@ define void @test_vst1q_f64(double* %a,
>>  }
>>
>>  define void @test_vst1_s8(i8* %a, <8 x i8> %b) {
>> -; CHECK: test_vst1_s8
>> +; CHECK-LABEL: test_vst1_s8
>>  ; CHECK: st1 {v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
>>    tail call void @llvm.arm.neon.vst1.v8i8(i8* %a, <8 x i8> %b, i32 1)
>>    ret void
>>  }
>>
>>  define void @test_vst1_s16(i16* %a, <4 x i16> %b) {
>> -; CHECK: test_vst1_s16
>> +; CHECK-LABEL: test_vst1_s16
>>  ; CHECK: st1 {v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
>>    %1 = bitcast i16* %a to i8*
>>    tail call void @llvm.arm.neon.vst1.v4i16(i8* %1, <4 x i16> %b, i32 2)
>> @@ -757,7 +757,7 @@ define void @test_vst1_s16(i16* %a, <4 x
>>  }
>>
>>  define void @test_vst1_s32(i32* %a, <2 x i32> %b) {
>> -; CHECK: test_vst1_s32
>> +; CHECK-LABEL: test_vst1_s32
>>  ; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
>>    %1 = bitcast i32* %a to i8*
>>    tail call void @llvm.arm.neon.vst1.v2i32(i8* %1, <2 x i32> %b, i32 4)
>> @@ -765,7 +765,7 @@ define void @test_vst1_s32(i32* %a, <2 x
>>  }
>>
>>  define void @test_vst1_s64(i64* %a, <1 x i64> %b) {
>> -; CHECK: test_vst1_s64
>> +; CHECK-LABEL: test_vst1_s64
>>  ; CHECK: st1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
>>    %1 = bitcast i64* %a to i8*
>>    tail call void @llvm.arm.neon.vst1.v1i64(i8* %1, <1 x i64> %b, i32 8)
>> @@ -773,7 +773,7 @@ define void @test_vst1_s64(i64* %a, <1 x
>>  }
>>
>>  define void @test_vst1_f32(float* %a, <2 x float> %b) {
>> -; CHECK: test_vst1_f32
>> +; CHECK-LABEL: test_vst1_f32
>>  ; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
>>    %1 = bitcast float* %a to i8*
>>    tail call void @llvm.arm.neon.vst1.v2f32(i8* %1, <2 x float> %b, i32
>> 4)
>> @@ -781,7 +781,7 @@ define void @test_vst1_f32(float* %a, <2
>>  }
>>
>>  define void @test_vst1_f64(double* %a, <1 x double> %b) {
>> -; CHECK: test_vst1_f64
>> +; CHECK-LABEL: test_vst1_f64
>>  ; CHECK: st1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
>>    %1 = bitcast double* %a to i8*
>>    tail call void @llvm.arm.neon.vst1.v1f64(i8* %1, <1 x double> %b, i32
>> 8)
>> @@ -789,7 +789,7 @@ define void @test_vst1_f64(double* %a, <
>>  }
>>
>>  define void @test_vst2q_s8(i8* %a, [2 x <16 x i8>] %b.coerce) {
>> -; CHECK: test_vst2q_s8
>> +; CHECK-LABEL: test_vst2q_s8
>>  ; CHECK: st2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [2 x <16 x i8>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [2 x <16 x i8>] %b.coerce, 1
>> @@ -798,7 +798,7 @@ define void @test_vst2q_s8(i8* %a, [2 x
>>  }
>>
>>  define void @test_vst2q_s16(i16* %a, [2 x <8 x i16>] %b.coerce) {
>> -; CHECK: test_vst2q_s16
>> +; CHECK-LABEL: test_vst2q_s16
>>  ; CHECK: st2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [2 x <8 x i16>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [2 x <8 x i16>] %b.coerce, 1
>> @@ -808,7 +808,7 @@ define void @test_vst2q_s16(i16* %a, [2
>>  }
>>
>>  define void @test_vst2q_s32(i32* %a, [2 x <4 x i32>] %b.coerce) {
>> -; CHECK: test_vst2q_s32
>> +; CHECK-LABEL: test_vst2q_s32
>>  ; CHECK: st2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [2 x <4 x i32>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [2 x <4 x i32>] %b.coerce, 1
>> @@ -818,7 +818,7 @@ define void @test_vst2q_s32(i32* %a, [2
>>  }
>>
>>  define void @test_vst2q_s64(i64* %a, [2 x <2 x i64>] %b.coerce) {
>> -; CHECK: test_vst2q_s64
>> +; CHECK-LABEL: test_vst2q_s64
>>  ; CHECK: st2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [2 x <2 x i64>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [2 x <2 x i64>] %b.coerce, 1
>> @@ -828,7 +828,7 @@ define void @test_vst2q_s64(i64* %a, [2
>>  }
>>
>>  define void @test_vst2q_f32(float* %a, [2 x <4 x float>] %b.coerce) {
>> -; CHECK: test_vst2q_f32
>> +; CHECK-LABEL: test_vst2q_f32
>>  ; CHECK: st2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [2 x <4 x float>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [2 x <4 x float>] %b.coerce, 1
>> @@ -838,7 +838,7 @@ define void @test_vst2q_f32(float* %a, [
>>  }
>>
>>  define void @test_vst2q_f64(double* %a, [2 x <2 x double>] %b.coerce) {
>> -; CHECK: test_vst2q_f64
>> +; CHECK-LABEL: test_vst2q_f64
>>  ; CHECK: st2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [2 x <2 x double>] %b.coerce,
>> 0
>>    %b.coerce.fca.1.extract = extractvalue [2 x <2 x double>] %b.coerce,
>> 1
>> @@ -848,7 +848,7 @@ define void @test_vst2q_f64(double* %a,
>>  }
>>
>>  define void @test_vst2_s8(i8* %a, [2 x <8 x i8>] %b.coerce) {
>> -; CHECK: test_vst2_s8
>> +; CHECK-LABEL: test_vst2_s8
>>  ; CHECK: st2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [2 x <8 x i8>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [2 x <8 x i8>] %b.coerce, 1
>> @@ -857,7 +857,7 @@ define void @test_vst2_s8(i8* %a, [2 x <
>>  }
>>
>>  define void @test_vst2_s16(i16* %a, [2 x <4 x i16>] %b.coerce) {
>> -; CHECK: test_vst2_s16
>> +; CHECK-LABEL: test_vst2_s16
>>  ; CHECK: st2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [2 x <4 x i16>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [2 x <4 x i16>] %b.coerce, 1
>> @@ -867,7 +867,7 @@ define void @test_vst2_s16(i16* %a, [2 x
>>  }
>>
>>  define void @test_vst2_s32(i32* %a, [2 x <2 x i32>] %b.coerce) {
>> -; CHECK: test_vst2_s32
>> +; CHECK-LABEL: test_vst2_s32
>>  ; CHECK: st2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [2 x <2 x i32>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [2 x <2 x i32>] %b.coerce, 1
>> @@ -877,7 +877,7 @@ define void @test_vst2_s32(i32* %a, [2 x
>>  }
>>
>>  define void @test_vst2_s64(i64* %a, [2 x <1 x i64>] %b.coerce) {
>> -; CHECK: test_vst2_s64
>> +; CHECK-LABEL: test_vst2_s64
>>  ; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [2 x <1 x i64>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [2 x <1 x i64>] %b.coerce, 1
>> @@ -887,7 +887,7 @@ define void @test_vst2_s64(i64* %a, [2 x
>>  }
>>
>>  define void @test_vst2_f32(float* %a, [2 x <2 x float>] %b.coerce) {
>> -; CHECK: test_vst2_f32
>> +; CHECK-LABEL: test_vst2_f32
>>  ; CHECK: st2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [2 x <2 x float>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [2 x <2 x float>] %b.coerce, 1
>> @@ -897,7 +897,7 @@ define void @test_vst2_f32(float* %a, [2
>>  }
>>
>>  define void @test_vst2_f64(double* %a, [2 x <1 x double>] %b.coerce) {
>> -; CHECK: test_vst2_f64
>> +; CHECK-LABEL: test_vst2_f64
>>  ; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [2 x <1 x double>] %b.coerce,
>> 0
>>    %b.coerce.fca.1.extract = extractvalue [2 x <1 x double>] %b.coerce,
>> 1
>> @@ -907,7 +907,7 @@ define void @test_vst2_f64(double* %a, [
>>  }
>>
>>  define void @test_vst3q_s8(i8* %a, [3 x <16 x i8>] %b.coerce) {
>> -; CHECK: test_vst3q_s8
>> +; CHECK-LABEL: test_vst3q_s8
>>  ; CHECK: st3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b},
>> [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [3 x <16 x i8>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [3 x <16 x i8>] %b.coerce, 1
>> @@ -917,7 +917,7 @@ define void @test_vst3q_s8(i8* %a, [3 x
>>  }
>>
>>  define void @test_vst3q_s16(i16* %a, [3 x <8 x i16>] %b.coerce) {
>> -; CHECK: test_vst3q_s16
>> +; CHECK-LABEL: test_vst3q_s16
>>  ; CHECK: st3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h},
>> [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [3 x <8 x i16>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [3 x <8 x i16>] %b.coerce, 1
>> @@ -928,7 +928,7 @@ define void @test_vst3q_s16(i16* %a, [3
>>  }
>>
>>  define void @test_vst3q_s32(i32* %a, [3 x <4 x i32>] %b.coerce) {
>> -; CHECK: test_vst3q_s32
>> +; CHECK-LABEL: test_vst3q_s32
>>  ; CHECK: st3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
>> [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [3 x <4 x i32>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [3 x <4 x i32>] %b.coerce, 1
>> @@ -939,7 +939,7 @@ define void @test_vst3q_s32(i32* %a, [3
>>  }
>>
>>  define void @test_vst3q_s64(i64* %a, [3 x <2 x i64>] %b.coerce) {
>> -; CHECK: test_vst3q_s64
>> +; CHECK-LABEL: test_vst3q_s64
>>  ; CHECK: st3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
>> [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [3 x <2 x i64>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [3 x <2 x i64>] %b.coerce, 1
>> @@ -950,7 +950,7 @@ define void @test_vst3q_s64(i64* %a, [3
>>  }
>>
>>  define void @test_vst3q_f32(float* %a, [3 x <4 x float>] %b.coerce) {
>> -; CHECK: test_vst3q_f32
>> +; CHECK-LABEL: test_vst3q_f32
>>  ; CHECK: st3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
>> [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [3 x <4 x float>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [3 x <4 x float>] %b.coerce, 1
>> @@ -961,7 +961,7 @@ define void @test_vst3q_f32(float* %a, [
>>  }
>>
>>  define void @test_vst3q_f64(double* %a, [3 x <2 x double>] %b.coerce) {
>> -; CHECK: test_vst3q_f64
>> +; CHECK-LABEL: test_vst3q_f64
>>  ; CHECK: st3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
>> [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [3 x <2 x double>] %b.coerce,
>> 0
>>    %b.coerce.fca.1.extract = extractvalue [3 x <2 x double>] %b.coerce,
>> 1
>> @@ -972,7 +972,7 @@ define void @test_vst3q_f64(double* %a,
>>  }
>>
>>  define void @test_vst3_s8(i8* %a, [3 x <8 x i8>] %b.coerce) {
>> -; CHECK: test_vst3_s8
>> +; CHECK-LABEL: test_vst3_s8
>>  ; CHECK: st3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b},
>> [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [3 x <8 x i8>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [3 x <8 x i8>] %b.coerce, 1
>> @@ -982,7 +982,7 @@ define void @test_vst3_s8(i8* %a, [3 x <
>>  }
>>
>>  define void @test_vst3_s16(i16* %a, [3 x <4 x i16>] %b.coerce) {
>> -; CHECK: test_vst3_s16
>> +; CHECK-LABEL: test_vst3_s16
>>  ; CHECK: st3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h},
>> [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [3 x <4 x i16>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [3 x <4 x i16>] %b.coerce, 1
>> @@ -993,7 +993,7 @@ define void @test_vst3_s16(i16* %a, [3 x
>>  }
>>
>>  define void @test_vst3_s32(i32* %a, [3 x <2 x i32>] %b.coerce) {
>> -; CHECK: test_vst3_s32
>> +; CHECK-LABEL: test_vst3_s32
>>  ; CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
>> [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [3 x <2 x i32>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [3 x <2 x i32>] %b.coerce, 1
>> @@ -1004,7 +1004,7 @@ define void @test_vst3_s32(i32* %a, [3 x
>>  }
>>
>>  define void @test_vst3_s64(i64* %a, [3 x <1 x i64>] %b.coerce) {
>> -; CHECK: test_vst3_s64
>> +; CHECK-LABEL: test_vst3_s64
>>  ; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
>> [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [3 x <1 x i64>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [3 x <1 x i64>] %b.coerce, 1
>> @@ -1015,7 +1015,7 @@ define void @test_vst3_s64(i64* %a, [3 x
>>  }
>>
>>  define void @test_vst3_f32(float* %a, [3 x <2 x float>] %b.coerce) {
>> -; CHECK: test_vst3_f32
>> +; CHECK-LABEL: test_vst3_f32
>>  ; CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
>> [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [3 x <2 x float>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [3 x <2 x float>] %b.coerce, 1
>> @@ -1026,7 +1026,7 @@ define void @test_vst3_f32(float* %a, [3
>>  }
>>
>>  define void @test_vst3_f64(double* %a, [3 x <1 x double>] %b.coerce) {
>> -; CHECK: test_vst3_f64
>> +; CHECK-LABEL: test_vst3_f64
>>  ; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
>> [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [3 x <1 x double>] %b.coerce,
>> 0
>>    %b.coerce.fca.1.extract = extractvalue [3 x <1 x double>] %b.coerce,
>> 1
>> @@ -1037,7 +1037,7 @@ define void @test_vst3_f64(double* %a, [
>>  }
>>
>>  define void @test_vst4q_s8(i8* %a, [4 x <16 x i8>] %b.coerce) {
>> -; CHECK: test_vst4q_s8
>> +; CHECK-LABEL: test_vst4q_s8
>>  ; CHECK: st4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b,
>> v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [4 x <16 x i8>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [4 x <16 x i8>] %b.coerce, 1
>> @@ -1048,7 +1048,7 @@ define void @test_vst4q_s8(i8* %a, [4 x
>>  }
>>
>>  define void @test_vst4q_s16(i16* %a, [4 x <8 x i16>] %b.coerce) {
>> -; CHECK: test_vst4q_s16
>> +; CHECK-LABEL: test_vst4q_s16
>>  ; CHECK: st4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h,
>> v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [4 x <8 x i16>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [4 x <8 x i16>] %b.coerce, 1
>> @@ -1060,7 +1060,7 @@ define void @test_vst4q_s16(i16* %a, [4
>>  }
>>
>>  define void @test_vst4q_s32(i32* %a, [4 x <4 x i32>] %b.coerce) {
>> -; CHECK: test_vst4q_s32
>> +; CHECK-LABEL: test_vst4q_s32
>>  ; CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
>> v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [4 x <4 x i32>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [4 x <4 x i32>] %b.coerce, 1
>> @@ -1072,7 +1072,7 @@ define void @test_vst4q_s32(i32* %a, [4
>>  }
>>
>>  define void @test_vst4q_s64(i64* %a, [4 x <2 x i64>] %b.coerce) {
>> -; CHECK: test_vst4q_s64
>> +; CHECK-LABEL: test_vst4q_s64
>>  ; CHECK: st4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
>> v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [4 x <2 x i64>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [4 x <2 x i64>] %b.coerce, 1
>> @@ -1084,7 +1084,7 @@ define void @test_vst4q_s64(i64* %a, [4
>>  }
>>
>>  define void @test_vst4q_f32(float* %a, [4 x <4 x float>] %b.coerce) {
>> -; CHECK: test_vst4q_f32
>> +; CHECK-LABEL: test_vst4q_f32
>>  ; CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
>> v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [4 x <4 x float>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [4 x <4 x float>] %b.coerce, 1
>> @@ -1096,7 +1096,7 @@ define void @test_vst4q_f32(float* %a, [
>>  }
>>
>>  define void @test_vst4q_f64(double* %a, [4 x <2 x double>] %b.coerce) {
>> -; CHECK: test_vst4q_f64
>> +; CHECK-LABEL: test_vst4q_f64
>>  ; CHECK: st4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
>> v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [4 x <2 x double>] %b.coerce,
>> 0
>>    %b.coerce.fca.1.extract = extractvalue [4 x <2 x double>] %b.coerce,
>> 1
>> @@ -1108,7 +1108,7 @@ define void @test_vst4q_f64(double* %a,
>>  }
>>
>>  define void @test_vst4_s8(i8* %a, [4 x <8 x i8>] %b.coerce) {
>> -; CHECK: test_vst4_s8
>> +; CHECK-LABEL: test_vst4_s8
>>  ; CHECK: st4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b,
>> v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [4 x <8 x i8>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [4 x <8 x i8>] %b.coerce, 1
>> @@ -1119,7 +1119,7 @@ define void @test_vst4_s8(i8* %a, [4 x <
>>  }
>>
>>  define void @test_vst4_s16(i16* %a, [4 x <4 x i16>] %b.coerce) {
>> -; CHECK: test_vst4_s16
>> +; CHECK-LABEL: test_vst4_s16
>>  ; CHECK: st4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h,
>> v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [4 x <4 x i16>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [4 x <4 x i16>] %b.coerce, 1
>> @@ -1131,7 +1131,7 @@ define void @test_vst4_s16(i16* %a, [4 x
>>  }
>>
>>  define void @test_vst4_s32(i32* %a, [4 x <2 x i32>] %b.coerce) {
>> -; CHECK: test_vst4_s32
>> +; CHECK-LABEL: test_vst4_s32
>>  ; CHECK: st4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
>> v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [4 x <2 x i32>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [4 x <2 x i32>] %b.coerce, 1
>> @@ -1143,7 +1143,7 @@ define void @test_vst4_s32(i32* %a, [4 x
>>  }
>>
>>  define void @test_vst4_s64(i64* %a, [4 x <1 x i64>] %b.coerce) {
>> -; CHECK: test_vst4_s64
>> +; CHECK-LABEL: test_vst4_s64
>>  ; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
>> v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [4 x <1 x i64>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [4 x <1 x i64>] %b.coerce, 1
>> @@ -1155,7 +1155,7 @@ define void @test_vst4_s64(i64* %a, [4 x
>>  }
>>
>>  define void @test_vst4_f32(float* %a, [4 x <2 x float>] %b.coerce) {
>> -; CHECK: test_vst4_f32
>> +; CHECK-LABEL: test_vst4_f32
>>  ; CHECK: st4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
>> v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [4 x <2 x float>] %b.coerce, 0
>>    %b.coerce.fca.1.extract = extractvalue [4 x <2 x float>] %b.coerce, 1
>> @@ -1167,7 +1167,7 @@ define void @test_vst4_f32(float* %a, [4
>>  }
>>
>>  define void @test_vst4_f64(double* %a, [4 x <1 x double>] %b.coerce) {
>> -; CHECK: test_vst4_f64
>> +; CHECK-LABEL: test_vst4_f64
>>  ; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
>> v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
>>    %b.coerce.fca.0.extract = extractvalue [4 x <1 x double>] %b.coerce,
>> 0
>>    %b.coerce.fca.1.extract = extractvalue [4 x <1 x double>] %b.coerce,
>> 1
>> @@ -1225,4 +1225,1018 @@ declare void @llvm.arm.neon.vst4.v4i16(i
>>  declare void @llvm.arm.neon.vst4.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x
>> i32>, <2 x i32>, i32)
>>  declare void @llvm.arm.neon.vst4.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x
>> i64>, <1 x i64>, i32)
>>  declare void @llvm.arm.neon.vst4.v2f32(i8*, <2 x float>, <2 x float>,
>> <2
>> x float>, <2 x float>, i32)
>> -declare void @llvm.arm.neon.vst4.v1f64(i8*, <1 x double>, <1 x double>,
>> <1 x double>, <1 x double>, i32)
>> \ No newline at end of file
>> +declare void @llvm.arm.neon.vst4.v1f64(i8*, <1 x double>, <1 x double>,
>> <1 x double>, <1 x double>, i32)
>> +
>> +define %struct.int8x16x2_t @test_vld1q_s8_x2(i8* %a)  {
>> +; CHECK-LABEL: test_vld1q_s8_x2
>> +; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
>> +  %1 = tail call { <16 x i8>, <16 x i8> }
>> @llvm.aarch64.neon.vld1x2.v16i8(i8* %a, i32 1)
>> +  %2 = extractvalue { <16 x i8>, <16 x i8> } %1, 0
>> +  %3 = extractvalue { <16 x i8>, <16 x i8> } %1, 1
>> +  %4 = insertvalue %struct.int8x16x2_t undef, <16 x i8> %2, 0, 0
>> +  %5 = insertvalue %struct.int8x16x2_t %4, <16 x i8> %3, 0, 1
>> +  ret %struct.int8x16x2_t %5
>> +}
>> +
>> +define %struct.int16x8x2_t @test_vld1q_s16_x2(i16* %a)  {
>> +; CHECK-LABEL: test_vld1q_s16_x2
>> +; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i16* %a to i8*
>> +  %2 = tail call { <8 x i16>, <8 x i16> }
>> @llvm.aarch64.neon.vld1x2.v8i16(i8* %1, i32 2)
>> +  %3 = extractvalue { <8 x i16>, <8 x i16> } %2, 0
>> +  %4 = extractvalue { <8 x i16>, <8 x i16> } %2, 1
>> +  %5 = insertvalue %struct.int16x8x2_t undef, <8 x i16> %3, 0, 0
>> +  %6 = insertvalue %struct.int16x8x2_t %5, <8 x i16> %4, 0, 1
>> +  ret %struct.int16x8x2_t %6
>> +}
>> +
>> +define %struct.int32x4x2_t @test_vld1q_s32_x2(i32* %a)  {
>> +; CHECK-LABEL: test_vld1q_s32_x2
>> +; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i32* %a to i8*
>> +  %2 = tail call { <4 x i32>, <4 x i32> }
>> @llvm.aarch64.neon.vld1x2.v4i32(i8* %1, i32 4)
>> +  %3 = extractvalue { <4 x i32>, <4 x i32> } %2, 0
>> +  %4 = extractvalue { <4 x i32>, <4 x i32> } %2, 1
>> +  %5 = insertvalue %struct.int32x4x2_t undef, <4 x i32> %3, 0, 0
>> +  %6 = insertvalue %struct.int32x4x2_t %5, <4 x i32> %4, 0, 1
>> +  ret %struct.int32x4x2_t %6
>> +}
>> +
>> +define %struct.int64x2x2_t @test_vld1q_s64_x2(i64* %a)  {
>> +; CHECK-LABEL: test_vld1q_s64_x2
>> +; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i64* %a to i8*
>> +  %2 = tail call { <2 x i64>, <2 x i64> }
>> @llvm.aarch64.neon.vld1x2.v2i64(i8* %1, i32 8)
>> +  %3 = extractvalue { <2 x i64>, <2 x i64> } %2, 0
>> +  %4 = extractvalue { <2 x i64>, <2 x i64> } %2, 1
>> +  %5 = insertvalue %struct.int64x2x2_t undef, <2 x i64> %3, 0, 0
>> +  %6 = insertvalue %struct.int64x2x2_t %5, <2 x i64> %4, 0, 1
>> +  ret %struct.int64x2x2_t %6
>> +}
>> +
>> +define %struct.float32x4x2_t @test_vld1q_f32_x2(float* %a)  {
>> +; CHECK-LABEL: test_vld1q_f32_x2
>> +; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast float* %a to i8*
>> +  %2 = tail call { <4 x float>, <4 x float> }
>> @llvm.aarch64.neon.vld1x2.v4f32(i8* %1, i32 4)
>> +  %3 = extractvalue { <4 x float>, <4 x float> } %2, 0
>> +  %4 = extractvalue { <4 x float>, <4 x float> } %2, 1
>> +  %5 = insertvalue %struct.float32x4x2_t undef, <4 x float> %3, 0, 0
>> +  %6 = insertvalue %struct.float32x4x2_t %5, <4 x float> %4, 0, 1
>> +  ret %struct.float32x4x2_t %6
>> +}
>> +
>> +
>> +define %struct.float64x2x2_t @test_vld1q_f64_x2(double* %a)  {
>> +; CHECK-LABEL: test_vld1q_f64_x2
>> +; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast double* %a to i8*
>> +  %2 = tail call { <2 x double>, <2 x double> }
>> @llvm.aarch64.neon.vld1x2.v2f64(i8* %1, i32 8)
>> +  %3 = extractvalue { <2 x double>, <2 x double> } %2, 0
>> +  %4 = extractvalue { <2 x double>, <2 x double> } %2, 1
>> +  %5 = insertvalue %struct.float64x2x2_t undef, <2 x double> %3, 0, 0
>> +  %6 = insertvalue %struct.float64x2x2_t %5, <2 x double> %4, 0, 1
>> +  ret %struct.float64x2x2_t %6
>> +}
>> +
>> +define %struct.int8x8x2_t @test_vld1_s8_x2(i8* %a)  {
>> +; CHECK-LABEL: test_vld1_s8_x2
>> +; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
>> +  %1 = tail call { <8 x i8>, <8 x i8> }
>> @llvm.aarch64.neon.vld1x2.v8i8(i8* %a, i32 1)
>> +  %2 = extractvalue { <8 x i8>, <8 x i8> } %1, 0
>> +  %3 = extractvalue { <8 x i8>, <8 x i8> } %1, 1
>> +  %4 = insertvalue %struct.int8x8x2_t undef, <8 x i8> %2, 0, 0
>> +  %5 = insertvalue %struct.int8x8x2_t %4, <8 x i8> %3, 0, 1
>> +  ret %struct.int8x8x2_t %5
>> +}
>> +
>> +define %struct.int16x4x2_t @test_vld1_s16_x2(i16* %a)  {
>> +; CHECK-LABEL: test_vld1_s16_x2
>> +; CHECK: ld1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i16* %a to i8*
>> +  %2 = tail call { <4 x i16>, <4 x i16> }
>> @llvm.aarch64.neon.vld1x2.v4i16(i8* %1, i32 2)
>> +  %3 = extractvalue { <4 x i16>, <4 x i16> } %2, 0
>> +  %4 = extractvalue { <4 x i16>, <4 x i16> } %2, 1
>> +  %5 = insertvalue %struct.int16x4x2_t undef, <4 x i16> %3, 0, 0
>> +  %6 = insertvalue %struct.int16x4x2_t %5, <4 x i16> %4, 0, 1
>> +  ret %struct.int16x4x2_t %6
>> +}
>> +
>> +define %struct.int32x2x2_t @test_vld1_s32_x2(i32* %a)  {
>> +; CHECK-LABEL: test_vld1_s32_x2
>> +; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i32* %a to i8*
>> +  %2 = tail call { <2 x i32>, <2 x i32> }
>> @llvm.aarch64.neon.vld1x2.v2i32(i8* %1, i32 4)
>> +  %3 = extractvalue { <2 x i32>, <2 x i32> } %2, 0
>> +  %4 = extractvalue { <2 x i32>, <2 x i32> } %2, 1
>> +  %5 = insertvalue %struct.int32x2x2_t undef, <2 x i32> %3, 0, 0
>> +  %6 = insertvalue %struct.int32x2x2_t %5, <2 x i32> %4, 0, 1
>> +  ret %struct.int32x2x2_t %6
>> +}
>> +
>> +define %struct.int64x1x2_t @test_vld1_s64_x2(i64* %a)  {
>> +; CHECK-LABEL: test_vld1_s64_x2
>> +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i64* %a to i8*
>> +  %2 = tail call { <1 x i64>, <1 x i64> }
>> @llvm.aarch64.neon.vld1x2.v1i64(i8* %1, i32 8)
>> +  %3 = extractvalue { <1 x i64>, <1 x i64> } %2, 0
>> +  %4 = extractvalue { <1 x i64>, <1 x i64> } %2, 1
>> +  %5 = insertvalue %struct.int64x1x2_t undef, <1 x i64> %3, 0, 0
>> +  %6 = insertvalue %struct.int64x1x2_t %5, <1 x i64> %4, 0, 1
>> +  ret %struct.int64x1x2_t %6
>> +}
>> +
>> +define %struct.float32x2x2_t @test_vld1_f32_x2(float* %a)  {
>> +; CHECK-LABEL: test_vld1_f32_x2
>> +; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast float* %a to i8*
>> +  %2 = tail call { <2 x float>, <2 x float> }
>> @llvm.aarch64.neon.vld1x2.v2f32(i8* %1, i32 4)
>> +  %3 = extractvalue { <2 x float>, <2 x float> } %2, 0
>> +  %4 = extractvalue { <2 x float>, <2 x float> } %2, 1
>> +  %5 = insertvalue %struct.float32x2x2_t undef, <2 x float> %3, 0, 0
>> +  %6 = insertvalue %struct.float32x2x2_t %5, <2 x float> %4, 0, 1
>> +  ret %struct.float32x2x2_t %6
>> +}
>> +
>> +define %struct.float64x1x2_t @test_vld1_f64_x2(double* %a)  {
>> +; CHECK-LABEL: test_vld1_f64_x2
>> +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast double* %a to i8*
>> +  %2 = tail call { <1 x double>, <1 x double> }
>> @llvm.aarch64.neon.vld1x2.v1f64(i8* %1, i32 8)
>> +  %3 = extractvalue { <1 x double>, <1 x double> } %2, 0
>> +  %4 = extractvalue { <1 x double>, <1 x double> } %2, 1
>> +  %5 = insertvalue %struct.float64x1x2_t undef, <1 x double> %3, 0, 0
>> +  %6 = insertvalue %struct.float64x1x2_t %5, <1 x double> %4, 0, 1
>> +  ret %struct.float64x1x2_t %6
>> +}
>> +
>> +define %struct.int8x16x3_t @test_vld1q_s8_x3(i8* %a)  {
>> +; CHECK-LABEL: test_vld1q_s8_x3
>> +; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = tail call { <16 x i8>, <16 x i8>, <16 x i8> }
>> @llvm.aarch64.neon.vld1x3.v16i8(i8* %a, i32 1)
>> +  %2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %1, 0
>> +  %3 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %1, 1
>> +  %4 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %1, 2
>> +  %5 = insertvalue %struct.int8x16x3_t undef, <16 x i8> %2, 0, 0
>> +  %6 = insertvalue %struct.int8x16x3_t %5, <16 x i8> %3, 0, 1
>> +  %7 = insertvalue %struct.int8x16x3_t %6, <16 x i8> %4, 0, 2
>> +  ret %struct.int8x16x3_t %7
>> +}
>> +
>> +define %struct.int16x8x3_t @test_vld1q_s16_x3(i16* %a)  {
>> +; CHECK-LABEL: test_vld1q_s16_x3
>> +; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i16* %a to i8*
>> +  %2 = tail call { <8 x i16>, <8 x i16>, <8 x i16> }
>> @llvm.aarch64.neon.vld1x3.v8i16(i8* %1, i32 2)
>> +  %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 0
>> +  %4 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 1
>> +  %5 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 2
>> +  %6 = insertvalue %struct.int16x8x3_t undef, <8 x i16> %3, 0, 0
>> +  %7 = insertvalue %struct.int16x8x3_t %6, <8 x i16> %4, 0, 1
>> +  %8 = insertvalue %struct.int16x8x3_t %7, <8 x i16> %5, 0, 2
>> +  ret %struct.int16x8x3_t %8
>> +}
>> +
>> +define %struct.int32x4x3_t @test_vld1q_s32_x3(i32* %a)  {
>> +; CHECK-LABEL: test_vld1q_s32_x3
>> +; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i32* %a to i8*
>> +  %2 = tail call { <4 x i32>, <4 x i32>, <4 x i32> }
>> @llvm.aarch64.neon.vld1x3.v4i32(i8* %1, i32 4)
>> +  %3 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %2, 0
>> +  %4 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %2, 1
>> +  %5 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32> } %2, 2
>> +  %6 = insertvalue %struct.int32x4x3_t undef, <4 x i32> %3, 0, 0
>> +  %7 = insertvalue %struct.int32x4x3_t %6, <4 x i32> %4, 0, 1
>> +  %8 = insertvalue %struct.int32x4x3_t %7, <4 x i32> %5, 0, 2
>> +  ret %struct.int32x4x3_t %8
>> +}
>> +
>> +define %struct.int64x2x3_t @test_vld1q_s64_x3(i64* %a)  {
>> +; CHECK-LABEL: test_vld1q_s64_x3
>> +; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i64* %a to i8*
>> +  %2 = tail call { <2 x i64>, <2 x i64>, <2 x i64> }
>> @llvm.aarch64.neon.vld1x3.v2i64(i8* %1, i32 8)
>> +  %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 0
>> +  %4 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 1
>> +  %5 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 2
>> +  %6 = insertvalue %struct.int64x2x3_t undef, <2 x i64> %3, 0, 0
>> +  %7 = insertvalue %struct.int64x2x3_t %6, <2 x i64> %4, 0, 1
>> +  %8 = insertvalue %struct.int64x2x3_t %7, <2 x i64> %5, 0, 2
>> +  ret %struct.int64x2x3_t %8
>> +}
>> +
>> +define %struct.float32x4x3_t @test_vld1q_f32_x3(float* %a)  {
>> +; CHECK-LABEL: test_vld1q_f32_x3
>> +; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = bitcast float* %a to i8*
>> +  %2 = tail call { <4 x float>, <4 x float>, <4 x float> }
>> @llvm.aarch64.neon.vld1x3.v4f32(i8* %1, i32 4)
>> +  %3 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %2, 0
>> +  %4 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %2, 1
>> +  %5 = extractvalue { <4 x float>, <4 x float>, <4 x float> } %2, 2
>> +  %6 = insertvalue %struct.float32x4x3_t undef, <4 x float> %3, 0, 0
>> +  %7 = insertvalue %struct.float32x4x3_t %6, <4 x float> %4, 0, 1
>> +  %8 = insertvalue %struct.float32x4x3_t %7, <4 x float> %5, 0, 2
>> +  ret %struct.float32x4x3_t %8
>> +}
>> +
>> +
>> +define %struct.float64x2x3_t @test_vld1q_f64_x3(double* %a)  {
>> +; CHECK-LABEL: test_vld1q_f64_x3
>> +; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = bitcast double* %a to i8*
>> +  %2 = tail call { <2 x double>, <2 x double>, <2 x double> }
>> @llvm.aarch64.neon.vld1x3.v2f64(i8* %1, i32 8)
>> +  %3 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %2, 0
>> +  %4 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %2, 1
>> +  %5 = extractvalue { <2 x double>, <2 x double>, <2 x double> } %2, 2
>> +  %6 = insertvalue %struct.float64x2x3_t undef, <2 x double> %3, 0, 0
>> +  %7 = insertvalue %struct.float64x2x3_t %6, <2 x double> %4, 0, 1
>> +  %8 = insertvalue %struct.float64x2x3_t %7, <2 x double> %5, 0, 2
>> +  ret %struct.float64x2x3_t %8
>> +}
>> +
>> +define %struct.int8x8x3_t @test_vld1_s8_x3(i8* %a)  {
>> +; CHECK-LABEL: test_vld1_s8_x3
>> +; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8> }
>> @llvm.aarch64.neon.vld1x3.v8i8(i8* %a, i32 1)
>> +  %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
>> +  %3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
>> +  %4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
>> +  %5 = insertvalue %struct.int8x8x3_t undef, <8 x i8> %2, 0, 0
>> +  %6 = insertvalue %struct.int8x8x3_t %5, <8 x i8> %3, 0, 1
>> +  %7 = insertvalue %struct.int8x8x3_t %6, <8 x i8> %4, 0, 2
>> +  ret %struct.int8x8x3_t %7
>> +}
>> +
>> +define %struct.int16x4x3_t @test_vld1_s16_x3(i16* %a)  {
>> +; CHECK-LABEL: test_vld1_s16_x3
>> +; CHECK: ld1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i16* %a to i8*
>> +  %2 = tail call { <4 x i16>, <4 x i16>, <4 x i16> }
>> @llvm.aarch64.neon.vld1x3.v4i16(i8* %1, i32 2)
>> +  %3 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 0
>> +  %4 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 1
>> +  %5 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %2, 2
>> +  %6 = insertvalue %struct.int16x4x3_t undef, <4 x i16> %3, 0, 0
>> +  %7 = insertvalue %struct.int16x4x3_t %6, <4 x i16> %4, 0, 1
>> +  %8 = insertvalue %struct.int16x4x3_t %7, <4 x i16> %5, 0, 2
>> +  ret %struct.int16x4x3_t %8
>> +}
>> +
>> +define %struct.int32x2x3_t @test_vld1_s32_x3(i32* %a)  {
>> +  %1 = bitcast i32* %a to i8*
>> +; CHECK-LABEL: test_vld1_s32_x3
>> +; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
>> +; [{{x[0-9]+|sp}}]
>> +  %2 = tail call { <2 x i32>, <2 x i32>, <2 x i32> }
>> @llvm.aarch64.neon.vld1x3.v2i32(i8* %1, i32 4)
>> +  %3 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %2, 0
>> +  %4 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %2, 1
>> +  %5 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32> } %2, 2
>> +  %6 = insertvalue %struct.int32x2x3_t undef, <2 x i32> %3, 0, 0
>> +  %7 = insertvalue %struct.int32x2x3_t %6, <2 x i32> %4, 0, 1
>> +  %8 = insertvalue %struct.int32x2x3_t %7, <2 x i32> %5, 0, 2
>> +  ret %struct.int32x2x3_t %8
>> +}
>> +
>> +define %struct.int64x1x3_t @test_vld1_s64_x3(i64* %a)  {
>> +; CHECK-LABEL: test_vld1_s64_x3
>> +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i64* %a to i8*
>> +  %2 = tail call { <1 x i64>, <1 x i64>, <1 x i64> }
>> @llvm.aarch64.neon.vld1x3.v1i64(i8* %1, i32 8)
>> +  %3 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %2, 0
>> +  %4 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %2, 1
>> +  %5 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64> } %2, 2
>> +  %6 = insertvalue %struct.int64x1x3_t undef, <1 x i64> %3, 0, 0
>> +  %7 = insertvalue %struct.int64x1x3_t %6, <1 x i64> %4, 0, 1
>> +  %8 = insertvalue %struct.int64x1x3_t %7, <1 x i64> %5, 0, 2
>> +  ret %struct.int64x1x3_t %8
>> +}
>> +
>> +define %struct.float32x2x3_t @test_vld1_f32_x3(float* %a)  {
>> +; CHECK-LABEL: test_vld1_f32_x3
>> +; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = bitcast float* %a to i8*
>> +  %2 = tail call { <2 x float>, <2 x float>, <2 x float> }
>> @llvm.aarch64.neon.vld1x3.v2f32(i8* %1, i32 4)
>> +  %3 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %2, 0
>> +  %4 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %2, 1
>> +  %5 = extractvalue { <2 x float>, <2 x float>, <2 x float> } %2, 2
>> +  %6 = insertvalue %struct.float32x2x3_t undef, <2 x float> %3, 0, 0
>> +  %7 = insertvalue %struct.float32x2x3_t %6, <2 x float> %4, 0, 1
>> +  %8 = insertvalue %struct.float32x2x3_t %7, <2 x float> %5, 0, 2
>> +  ret %struct.float32x2x3_t %8
>> +}
>> +
>> +
>> +define %struct.float64x1x3_t @test_vld1_f64_x3(double* %a)  {
>> +; CHECK-LABEL: test_vld1_f64_x3
>> +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = bitcast double* %a to i8*
>> +  %2 = tail call { <1 x double>, <1 x double>, <1 x double> }
>> @llvm.aarch64.neon.vld1x3.v1f64(i8* %1, i32 8)
>> +  %3 = extractvalue { <1 x double>, <1 x double>, <1 x double> } %2, 0
>> +  %4 = extractvalue { <1 x double>, <1 x double>, <1 x double> } %2, 1
>> +  %5 = extractvalue { <1 x double>, <1 x double>, <1 x double> } %2, 2
>> +  %6 = insertvalue %struct.float64x1x3_t undef, <1 x double> %3, 0, 0
>> +  %7 = insertvalue %struct.float64x1x3_t %6, <1 x double> %4, 0, 1
>> +  %8 = insertvalue %struct.float64x1x3_t %7, <1 x double> %5, 0, 2
>> +  ret %struct.float64x1x3_t %8
>> +}
>> +
>> +define %struct.int8x16x4_t @test_vld1q_s8_x4(i8* %a)  {
>> +; CHECK-LABEL: test_vld1q_s8_x4
>> +; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b,
>> +; v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
>> +  %1 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> }
>> @llvm.aarch64.neon.vld1x4.v16i8(i8* %a, i32 1)
>> +  %2 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1,
>> 0
>> +  %3 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1,
>> 1
>> +  %4 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1,
>> 2
>> +  %5 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %1,
>> 3
>> +  %6 = insertvalue %struct.int8x16x4_t undef, <16 x i8> %2, 0, 0
>> +  %7 = insertvalue %struct.int8x16x4_t %6, <16 x i8> %3, 0, 1
>> +  %8 = insertvalue %struct.int8x16x4_t %7, <16 x i8> %4, 0, 2
>> +  %9 = insertvalue %struct.int8x16x4_t %8, <16 x i8> %5, 0, 3
>> +  ret %struct.int8x16x4_t %9
>> +}
>> +
>> +define %struct.int16x8x4_t @test_vld1q_s16_x4(i16* %a)  {
>> +; CHECK-LABEL: test_vld1q_s16_x4
>> +; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h,
>> +; v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i16* %a to i8*
>> +  %2 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }
>> @llvm.aarch64.neon.vld1x4.v8i16(i8* %1, i32 2)
>> +  %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2,
>> 0
>> +  %4 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2,
>> 1
>> +  %5 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2,
>> 2
>> +  %6 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %2,
>> 3
>> +  %7 = insertvalue %struct.int16x8x4_t undef, <8 x i16> %3, 0, 0
>> +  %8 = insertvalue %struct.int16x8x4_t %7, <8 x i16> %4, 0, 1
>> +  %9 = insertvalue %struct.int16x8x4_t %8, <8 x i16> %5, 0, 2
>> +  %10 = insertvalue %struct.int16x8x4_t %9, <8 x i16> %6, 0, 3
>> +  ret %struct.int16x8x4_t %10
>> +}
>> +
>> +define %struct.int32x4x4_t @test_vld1q_s32_x4(i32* %a)  {
>> +; CHECK-LABEL: test_vld1q_s32_x4
>> +; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
>> +; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i32* %a to i8*
>> +  %2 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> }
>> @llvm.aarch64.neon.vld1x4.v4i32(i8* %1, i32 4)
>> +  %3 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2,
>> 0
>> +  %4 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2,
>> 1
>> +  %5 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2,
>> 2
>> +  %6 = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %2,
>> 3
>> +  %7 = insertvalue %struct.int32x4x4_t undef, <4 x i32> %3, 0, 0
>> +  %8 = insertvalue %struct.int32x4x4_t %7, <4 x i32> %4, 0, 1
>> +  %9 = insertvalue %struct.int32x4x4_t %8, <4 x i32> %5, 0, 2
>> +  %10 = insertvalue %struct.int32x4x4_t %9, <4 x i32> %6, 0, 3
>> +  ret %struct.int32x4x4_t %10
>> +}
>> +
>> +define %struct.int64x2x4_t @test_vld1q_s64_x4(i64* %a)  {
>> +; CHECK-LABEL: test_vld1q_s64_x4
>> +; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
>> +; v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i64* %a to i8*
>> +  %2 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> }
>> @llvm.aarch64.neon.vld1x4.v2i64(i8* %1, i32 8)
>> +  %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2,
>> 0
>> +  %4 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2,
>> 1
>> +  %5 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2,
>> 2
>> +  %6 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %2,
>> 3
>> +  %7 = insertvalue %struct.int64x2x4_t undef, <2 x i64> %3, 0, 0
>> +  %8 = insertvalue %struct.int64x2x4_t %7, <2 x i64> %4, 0, 1
>> +  %9 = insertvalue %struct.int64x2x4_t %8, <2 x i64> %5, 0, 2
>> +  %10 = insertvalue %struct.int64x2x4_t %9, <2 x i64> %6, 0, 3
>> +  ret %struct.int64x2x4_t %10
>> +}
>> +
>> +define %struct.float32x4x4_t @test_vld1q_f32_x4(float* %a)  {
>> +; CHECK-LABEL: test_vld1q_f32_x4
>> +; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
>> +; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast float* %a to i8*
>> +  %2 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> }
>> @llvm.aarch64.neon.vld1x4.v4f32(i8* %1, i32 4)
>> +  %3 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x
>> float>
>> } %2, 0
>> +  %4 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x
>> float>
>> } %2, 1
>> +  %5 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x
>> float>
>> } %2, 2
>> +  %6 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x
>> float>
>> } %2, 3
>> +  %7 = insertvalue %struct.float32x4x4_t undef, <4 x float> %3, 0, 0
>> +  %8 = insertvalue %struct.float32x4x4_t %7, <4 x float> %4, 0, 1
>> +  %9 = insertvalue %struct.float32x4x4_t %8, <4 x float> %5, 0, 2
>> +  %10 = insertvalue %struct.float32x4x4_t %9, <4 x float> %6, 0, 3
>> +  ret %struct.float32x4x4_t %10
>> +}
>> +
>> +define %struct.float64x2x4_t @test_vld1q_f64_x4(double* %a)  {
>> +; CHECK-LABEL: test_vld1q_f64_x4
>> +; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
>> +; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast double* %a to i8*
>> +  %2 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x
>> double>
>> } @llvm.aarch64.neon.vld1x4.v2f64(i8* %1, i32 8)
>> +  %3 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x
>> double> } %2, 0
>> +  %4 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x
>> double> } %2, 1
>> +  %5 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x
>> double> } %2, 2
>> +  %6 = extractvalue { <2 x double>, <2 x double>, <2 x double>, <2 x
>> double> } %2, 3
>> +  %7 = insertvalue %struct.float64x2x4_t undef, <2 x double> %3, 0, 0
>> +  %8 = insertvalue %struct.float64x2x4_t %7, <2 x double> %4, 0, 1
>> +  %9 = insertvalue %struct.float64x2x4_t %8, <2 x double> %5, 0, 2
>> +  %10 = insertvalue %struct.float64x2x4_t %9, <2 x double> %6, 0, 3
>> +  ret %struct.float64x2x4_t %10
>> +}
>> +
>> +define %struct.int8x8x4_t @test_vld1_s8_x4(i8* %a)  {
>> +; CHECK-LABEL: test_vld1_s8_x4
>> +; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b,
>> +; v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
>> +  %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
>> @llvm.aarch64.neon.vld1x4.v8i8(i8* %a, i32 1)
>> +  %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
>> +  %3 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 1
>> +  %4 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 2
>> +  %5 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 3
>> +  %6 = insertvalue %struct.int8x8x4_t undef, <8 x i8> %2, 0, 0
>> +  %7 = insertvalue %struct.int8x8x4_t %6, <8 x i8> %3, 0, 1
>> +  %8 = insertvalue %struct.int8x8x4_t %7, <8 x i8> %4, 0, 2
>> +  %9 = insertvalue %struct.int8x8x4_t %8, <8 x i8> %5, 0, 3
>> +  ret %struct.int8x8x4_t %9
>> +}
>> +
>> +define %struct.int16x4x4_t @test_vld1_s16_x4(i16* %a)  {
>> +; CHECK-LABEL: test_vld1_s16_x4
>> +; CHECK: ld1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h,
>> +; v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i16* %a to i8*
>> +  %2 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
>> @llvm.aarch64.neon.vld1x4.v4i16(i8* %1, i32 2)
>> +  %3 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2,
>> 0
>> +  %4 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2,
>> 1
>> +  %5 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2,
>> 2
>> +  %6 = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %2,
>> 3
>> +  %7 = insertvalue %struct.int16x4x4_t undef, <4 x i16> %3, 0, 0
>> +  %8 = insertvalue %struct.int16x4x4_t %7, <4 x i16> %4, 0, 1
>> +  %9 = insertvalue %struct.int16x4x4_t %8, <4 x i16> %5, 0, 2
>> +  %10 = insertvalue %struct.int16x4x4_t %9, <4 x i16> %6, 0, 3
>> +  ret %struct.int16x4x4_t %10
>> +}
>> +
>> +define %struct.int32x2x4_t @test_vld1_s32_x4(i32* %a)  {
>> +; CHECK-LABEL: test_vld1_s32_x4
>> +; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
>> +; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i32* %a to i8*
>> +  %2 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
>> @llvm.aarch64.neon.vld1x4.v2i32(i8* %1, i32 4)
>> +  %3 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2,
>> 0
>> +  %4 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2,
>> 1
>> +  %5 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2,
>> 2
>> +  %6 = extractvalue { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %2,
>> 3
>> +  %7 = insertvalue %struct.int32x2x4_t undef, <2 x i32> %3, 0, 0
>> +  %8 = insertvalue %struct.int32x2x4_t %7, <2 x i32> %4, 0, 1
>> +  %9 = insertvalue %struct.int32x2x4_t %8, <2 x i32> %5, 0, 2
>> +  %10 = insertvalue %struct.int32x2x4_t %9, <2 x i32> %6, 0, 3
>> +  ret %struct.int32x2x4_t %10
>> +}
>> +
>> +define %struct.int64x1x4_t @test_vld1_s64_x4(i64* %a)  {
>> +; CHECK-LABEL: test_vld1_s64_x4
>> +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
>> +; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast i64* %a to i8*
>> +  %2 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> }
>> @llvm.aarch64.neon.vld1x4.v1i64(i8* %1, i32 8)
>> +  %3 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2,
>> 0
>> +  %4 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2,
>> 1
>> +  %5 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2,
>> 2
>> +  %6 = extractvalue { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %2,
>> 3
>> +  %7 = insertvalue %struct.int64x1x4_t undef, <1 x i64> %3, 0, 0
>> +  %8 = insertvalue %struct.int64x1x4_t %7, <1 x i64> %4, 0, 1
>> +  %9 = insertvalue %struct.int64x1x4_t %8, <1 x i64> %5, 0, 2
>> +  %10 = insertvalue %struct.int64x1x4_t %9, <1 x i64> %6, 0, 3
>> +  ret %struct.int64x1x4_t %10
>> +}
>> +
>> +define %struct.float32x2x4_t @test_vld1_f32_x4(float* %a)  {
>> +; CHECK-LABEL: test_vld1_f32_x4
>> +; CHECK: ld1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
>> +; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast float* %a to i8*
>> +  %2 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> }
>> @llvm.aarch64.neon.vld1x4.v2f32(i8* %1, i32 4)
>> +  %3 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x
>> float>
>> } %2, 0
>> +  %4 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x
>> float>
>> } %2, 1
>> +  %5 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x
>> float>
>> } %2, 2
>> +  %6 = extractvalue { <2 x float>, <2 x float>, <2 x float>, <2 x
>> float>
>> } %2, 3
>> +  %7 = insertvalue %struct.float32x2x4_t undef, <2 x float> %3, 0, 0
>> +  %8 = insertvalue %struct.float32x2x4_t %7, <2 x float> %4, 0, 1
>> +  %9 = insertvalue %struct.float32x2x4_t %8, <2 x float> %5, 0, 2
>> +  %10 = insertvalue %struct.float32x2x4_t %9, <2 x float> %6, 0, 3
>> +  ret %struct.float32x2x4_t %10
>> +}
>> +
>> +
>> +define %struct.float64x1x4_t @test_vld1_f64_x4(double* %a)  {
>> +; CHECK-LABEL: test_vld1_f64_x4
>> +; CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
>> +; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
>> +  %1 = bitcast double* %a to i8*
>> +  %2 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x
>> double>
>> } @llvm.aarch64.neon.vld1x4.v1f64(i8* %1, i32 8)
>> +  %3 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x
>> double> } %2, 0
>> +  %4 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x
>> double> } %2, 1
>> +  %5 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x
>> double> } %2, 2
>> +  %6 = extractvalue { <1 x double>, <1 x double>, <1 x double>, <1 x
>> double> } %2, 3
>> +  %7 = insertvalue %struct.float64x1x4_t undef, <1 x double> %3, 0, 0
>> +  %8 = insertvalue %struct.float64x1x4_t %7, <1 x double> %4, 0, 1
>> +  %9 = insertvalue %struct.float64x1x4_t %8, <1 x double> %5, 0, 2
>> +  %10 = insertvalue %struct.float64x1x4_t %9, <1 x double> %6, 0, 3
>> +  ret %struct.float64x1x4_t %10
>> +}
>> +
>> +define void @test_vst1q_s8_x2(i8* %a, [2 x <16 x i8>] %b)  {
>> +; CHECK-LABEL: test_vst1q_s8_x2
>> +; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [2 x <16 x i8>] %b, 0
>> +  %2 = extractvalue [2 x <16 x i8>] %b, 1
>> +  tail call void @llvm.aarch64.neon.vst1x2.v16i8(i8* %a, <16 x i8> %1,
>> <16 x i8> %2, i32 1)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_s16_x2(i16* %a, [2 x <8 x i16>] %b)  {
>> +; CHECK-LABEL: test_vst1q_s16_x2
>> +; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [2 x <8 x i16>] %b, 0
>> +  %2 = extractvalue [2 x <8 x i16>] %b, 1
>> +  %3 = bitcast i16* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x2.v8i16(i8* %3, <8 x i16> %1,
>> <8
>> x i16> %2, i32 2)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_s32_x2(i32* %a, [2 x <4 x i32>] %b)  {
>> +; CHECK-LABEL: test_vst1q_s32_x2
>> +; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [2 x <4 x i32>] %b, 0
>> +  %2 = extractvalue [2 x <4 x i32>] %b, 1
>> +  %3 = bitcast i32* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x2.v4i32(i8* %3, <4 x i32> %1,
>> <4
>> x i32> %2, i32 4)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_s64_x2(i64* %a, [2 x <2 x i64>] %b)  {
>> +; CHECK-LABEL: test_vst1q_s64_x2
>> +; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [2 x <2 x i64>] %b, 0
>> +  %2 = extractvalue [2 x <2 x i64>] %b, 1
>> +  %3 = bitcast i64* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x2.v2i64(i8* %3, <2 x i64> %1,
>> <2
>> x i64> %2, i32 8)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_f32_x2(float* %a, [2 x <4 x float>] %b)  {
>> +; CHECK-LABEL: test_vst1q_f32_x2
>> +; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [2 x <4 x float>] %b, 0
>> +  %2 = extractvalue [2 x <4 x float>] %b, 1
>> +  %3 = bitcast float* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x2.v4f32(i8* %3, <4 x float>
>> %1,
>> <4 x float> %2, i32 4)
>> +  ret void
>> +}
>> +
>> +
>> +define void @test_vst1q_f64_x2(double* %a, [2 x <2 x double>] %b)  {
>> +; CHECK-LABEL: test_vst1q_f64_x2
>> +; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [2 x <2 x double>] %b, 0
>> +  %2 = extractvalue [2 x <2 x double>] %b, 1
>> +  %3 = bitcast double* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x2.v2f64(i8* %3, <2 x double>
>> %1,
>> <2 x double> %2, i32 8)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_s8_x2(i8* %a, [2 x <8 x i8>] %b)  {
>> +; CHECK-LABEL: test_vst1_s8_x2
>> +; CHECK: st1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [2 x <8 x i8>] %b, 0
>> +  %2 = extractvalue [2 x <8 x i8>] %b, 1
>> +  tail call void @llvm.aarch64.neon.vst1x2.v8i8(i8* %a, <8 x i8> %1, <8
>> x
>> i8> %2, i32 1)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_s16_x2(i16* %a, [2 x <4 x i16>] %b)  {
>> +; CHECK-LABEL: test_vst1_s16_x2
>> +; CHECK: st1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [2 x <4 x i16>] %b, 0
>> +  %2 = extractvalue [2 x <4 x i16>] %b, 1
>> +  %3 = bitcast i16* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x2.v4i16(i8* %3, <4 x i16> %1,
>> <4
>> x i16> %2, i32 2)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_s32_x2(i32* %a, [2 x <2 x i32>] %b)  {
>> +; CHECK-LABEL: test_vst1_s32_x2
>> +; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [2 x <2 x i32>] %b, 0
>> +  %2 = extractvalue [2 x <2 x i32>] %b, 1
>> +  %3 = bitcast i32* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x2.v2i32(i8* %3, <2 x i32> %1,
>> <2
>> x i32> %2, i32 4)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_s64_x2(i64* %a, [2 x <1 x i64>] %b)  {
>> +; CHECK-LABEL: test_vst1_s64_x2
>> +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [2 x <1 x i64>] %b, 0
>> +  %2 = extractvalue [2 x <1 x i64>] %b, 1
>> +  %3 = bitcast i64* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x2.v1i64(i8* %3, <1 x i64> %1,
>> <1
>> x i64> %2, i32 8)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_f32_x2(float* %a, [2 x <2 x float>] %b)  {
>> +; CHECK-LABEL: test_vst1_f32_x2
>> +; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [2 x <2 x float>] %b, 0
>> +  %2 = extractvalue [2 x <2 x float>] %b, 1
>> +  %3 = bitcast float* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x2.v2f32(i8* %3, <2 x float>
>> %1,
>> <2 x float> %2, i32 4)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_f64_x2(double* %a, [2 x <1 x double>] %b)  {
>> +; CHECK-LABEL: test_vst1_f64_x2
>> +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [2 x <1 x double>] %b, 0
>> +  %2 = extractvalue [2 x <1 x double>] %b, 1
>> +  %3 = bitcast double* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x2.v1f64(i8* %3, <1 x double>
>> %1,
>> <1 x double> %2, i32 8)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_s8_x3(i8* %a, [3 x <16 x i8>] %b)  {
>> +; CHECK-LABEL: test_vst1q_s8_x3
>> +; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [3 x <16 x i8>] %b, 0
>> +  %2 = extractvalue [3 x <16 x i8>] %b, 1
>> +  %3 = extractvalue [3 x <16 x i8>] %b, 2
>> +  tail call void @llvm.aarch64.neon.vst1x3.v16i8(i8* %a, <16 x i8> %1,
>> <16 x i8> %2, <16 x i8> %3, i32 1)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_s16_x3(i16* %a, [3 x <8 x i16>] %b)  {
>> +; CHECK-LABEL: test_vst1q_s16_x3
>> +; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [3 x <8 x i16>] %b, 0
>> +  %2 = extractvalue [3 x <8 x i16>] %b, 1
>> +  %3 = extractvalue [3 x <8 x i16>] %b, 2
>> +  %4 = bitcast i16* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x3.v8i16(i8* %4, <8 x i16> %1,
>> <8
>> x i16> %2, <8 x i16> %3, i32 2)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_s32_x3(i32* %a, [3 x <4 x i32>] %b)  {
>> +; CHECK-LABEL: test_vst1q_s32_x3
>> +; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [3 x <4 x i32>] %b, 0
>> +  %2 = extractvalue [3 x <4 x i32>] %b, 1
>> +  %3 = extractvalue [3 x <4 x i32>] %b, 2
>> +  %4 = bitcast i32* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x3.v4i32(i8* %4, <4 x i32> %1,
>> <4
>> x i32> %2, <4 x i32> %3, i32 4)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_s64_x3(i64* %a, [3 x <2 x i64>] %b)  {
>> +; CHECK-LABEL: test_vst1q_s64_x3
>> +; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [3 x <2 x i64>] %b, 0
>> +  %2 = extractvalue [3 x <2 x i64>] %b, 1
>> +  %3 = extractvalue [3 x <2 x i64>] %b, 2
>> +  %4 = bitcast i64* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x3.v2i64(i8* %4, <2 x i64> %1,
>> <2
>> x i64> %2, <2 x i64> %3, i32 8)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_f32_x3(float* %a, [3 x <4 x float>] %b)  {
>> +; CHECK-LABEL: test_vst1q_f32_x3
>> +; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [3 x <4 x float>] %b, 0
>> +  %2 = extractvalue [3 x <4 x float>] %b, 1
>> +  %3 = extractvalue [3 x <4 x float>] %b, 2
>> +  %4 = bitcast float* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x3.v4f32(i8* %4, <4 x float>
>> %1,
>> <4 x float> %2, <4 x float> %3, i32 4)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_f64_x3(double* %a, [3 x <2 x double>] %b)  {
>> +; CHECK-LABEL: test_vst1q_f64_x3
>> +; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [3 x <2 x double>] %b, 0
>> +  %2 = extractvalue [3 x <2 x double>] %b, 1
>> +  %3 = extractvalue [3 x <2 x double>] %b, 2
>> +  %4 = bitcast double* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x3.v2f64(i8* %4, <2 x double>
>> %1,
>> <2 x double> %2, <2 x double> %3, i32 8)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_s8_x3(i8* %a, [3 x <8 x i8>] %b)  {
>> +; CHECK-LABEL: test_vst1_s8_x3
>> +; CHECK: st1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [3 x <8 x i8>] %b, 0
>> +  %2 = extractvalue [3 x <8 x i8>] %b, 1
>> +  %3 = extractvalue [3 x <8 x i8>] %b, 2
>> +  tail call void @llvm.aarch64.neon.vst1x3.v8i8(i8* %a, <8 x i8> %1, <8
>> x
>> i8> %2, <8 x i8> %3, i32 1)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_s16_x3(i16* %a, [3 x <4 x i16>] %b)  {
>> +; CHECK-LABEL: test_vst1_s16_x3
>> +; CHECK: st1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [3 x <4 x i16>] %b, 0
>> +  %2 = extractvalue [3 x <4 x i16>] %b, 1
>> +  %3 = extractvalue [3 x <4 x i16>] %b, 2
>> +  %4 = bitcast i16* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x3.v4i16(i8* %4, <4 x i16> %1,
>> <4
>> x i16> %2, <4 x i16> %3, i32 2)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_s32_x3(i32* %a, [3 x <2 x i32>] %b)  {
>> +; CHECK-LABEL: test_vst1_s32_x3
>> +; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [3 x <2 x i32>] %b, 0
>> +  %2 = extractvalue [3 x <2 x i32>] %b, 1
>> +  %3 = extractvalue [3 x <2 x i32>] %b, 2
>> +  %4 = bitcast i32* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x3.v2i32(i8* %4, <2 x i32> %1,
>> <2
>> x i32> %2, <2 x i32> %3, i32 4)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_s64_x3(i64* %a, [3 x <1 x i64>] %b)  {
>> +; CHECK-LABEL: test_vst1_s64_x3
>> +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [3 x <1 x i64>] %b, 0
>> +  %2 = extractvalue [3 x <1 x i64>] %b, 1
>> +  %3 = extractvalue [3 x <1 x i64>] %b, 2
>> +  %4 = bitcast i64* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x3.v1i64(i8* %4, <1 x i64> %1,
>> <1
>> x i64> %2, <1 x i64> %3, i32 8)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_f32_x3(float* %a, [3 x <2 x float>] %b)  {
>> +; CHECK-LABEL: test_vst1_f32_x3
>> +; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [3 x <2 x float>] %b, 0
>> +  %2 = extractvalue [3 x <2 x float>] %b, 1
>> +  %3 = extractvalue [3 x <2 x float>] %b, 2
>> +  %4 = bitcast float* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x3.v2f32(i8* %4, <2 x float>
>> %1,
>> <2 x float> %2, <2 x float> %3, i32 4)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_f64_x3(double* %a, [3 x <1 x double>] %b)  {
>> +; CHECK-LABEL: test_vst1_f64_x3
>> +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
>> +; [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [3 x <1 x double>] %b, 0
>> +  %2 = extractvalue [3 x <1 x double>] %b, 1
>> +  %3 = extractvalue [3 x <1 x double>] %b, 2
>> +  %4 = bitcast double* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x3.v1f64(i8* %4, <1 x double>
>> %1,
>> <1 x double> %2, <1 x double> %3, i32 8)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_s8_x4(i8* %a, [4 x <16 x i8>] %b)  {
>> +; CHECK-LABEL: test_vst1q_s8_x4
>> +; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b,
>> +; v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [4 x <16 x i8>] %b, 0
>> +  %2 = extractvalue [4 x <16 x i8>] %b, 1
>> +  %3 = extractvalue [4 x <16 x i8>] %b, 2
>> +  %4 = extractvalue [4 x <16 x i8>] %b, 3
>> +  tail call void @llvm.aarch64.neon.vst1x4.v16i8(i8* %a, <16 x i8> %1,
>> <16 x i8> %2, <16 x i8> %3, <16 x i8> %4, i32 1)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_s16_x4(i16* %a, [4 x <8 x i16>] %b)  {
>> +; CHECK-LABEL: test_vst1q_s16_x4
>> +; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h,
>> +; v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [4 x <8 x i16>] %b, 0
>> +  %2 = extractvalue [4 x <8 x i16>] %b, 1
>> +  %3 = extractvalue [4 x <8 x i16>] %b, 2
>> +  %4 = extractvalue [4 x <8 x i16>] %b, 3
>> +  %5 = bitcast i16* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x4.v8i16(i8* %5, <8 x i16> %1,
>> <8
>> x i16> %2, <8 x i16> %3, <8 x i16> %4, i32 2)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_s32_x4(i32* %a, [4 x <4 x i32>] %b)  {
>> +; CHECK-LABEL: test_vst1q_s32_x4
>> +; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
>> +; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [4 x <4 x i32>] %b, 0
>> +  %2 = extractvalue [4 x <4 x i32>] %b, 1
>> +  %3 = extractvalue [4 x <4 x i32>] %b, 2
>> +  %4 = extractvalue [4 x <4 x i32>] %b, 3
>> +  %5 = bitcast i32* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x4.v4i32(i8* %5, <4 x i32> %1,
>> <4
>> x i32> %2, <4 x i32> %3, <4 x i32> %4, i32 4)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_s64_x4(i64* %a, [4 x <2 x i64>] %b)  {
>> +; CHECK-LABEL: test_vst1q_s64_x4
>> +; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
>> +; v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [4 x <2 x i64>] %b, 0
>> +  %2 = extractvalue [4 x <2 x i64>] %b, 1
>> +  %3 = extractvalue [4 x <2 x i64>] %b, 2
>> +  %4 = extractvalue [4 x <2 x i64>] %b, 3
>> +  %5 = bitcast i64* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x4.v2i64(i8* %5, <2 x i64> %1,
>> <2
>> x i64> %2, <2 x i64> %3, <2 x i64> %4, i32 8)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_f32_x4(float* %a, [4 x <4 x float>] %b)  {
>> +; CHECK-LABEL: test_vst1q_f32_x4
>> +; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
>> +; v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [4 x <4 x float>] %b, 0
>> +  %2 = extractvalue [4 x <4 x float>] %b, 1
>> +  %3 = extractvalue [4 x <4 x float>] %b, 2
>> +  %4 = extractvalue [4 x <4 x float>] %b, 3
>> +  %5 = bitcast float* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x4.v4f32(i8* %5, <4 x float>
>> %1,
>> <4 x float> %2, <4 x float> %3, <4 x float> %4, i32 4)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1q_f64_x4(double* %a, [4 x <2 x double>] %b)  {
>> +; CHECK-LABEL: test_vst1q_f64_x4
>> +; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
>> +; v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [4 x <2 x double>] %b, 0
>> +  %2 = extractvalue [4 x <2 x double>] %b, 1
>> +  %3 = extractvalue [4 x <2 x double>] %b, 2
>> +  %4 = extractvalue [4 x <2 x double>] %b, 3
>> +  %5 = bitcast double* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x4.v2f64(i8* %5, <2 x double>
>> %1,
>> <2 x double> %2, <2 x double> %3, <2 x double> %4, i32 8)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_s8_x4(i8* %a, [4 x <8 x i8>] %b)  {
>> +; CHECK-LABEL: test_vst1_s8_x4
>> +; CHECK: st1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b,
>> +; v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [4 x <8 x i8>] %b, 0
>> +  %2 = extractvalue [4 x <8 x i8>] %b, 1
>> +  %3 = extractvalue [4 x <8 x i8>] %b, 2
>> +  %4 = extractvalue [4 x <8 x i8>] %b, 3
>> +  tail call void @llvm.aarch64.neon.vst1x4.v8i8(i8* %a, <8 x i8> %1, <8
>> x
>> i8> %2, <8 x i8> %3, <8 x i8> %4, i32 1)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_s16_x4(i16* %a, [4 x <4 x i16>] %b)  {
>> +; CHECK-LABEL: test_vst1_s16_x4
>> +; CHECK: st1 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h,
>> +; v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [4 x <4 x i16>] %b, 0
>> +  %2 = extractvalue [4 x <4 x i16>] %b, 1
>> +  %3 = extractvalue [4 x <4 x i16>] %b, 2
>> +  %4 = extractvalue [4 x <4 x i16>] %b, 3
>> +  %5 = bitcast i16* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x4.v4i16(i8* %5, <4 x i16> %1,
>> <4
>> x i16> %2, <4 x i16> %3, <4 x i16> %4, i32 2)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_s32_x4(i32* %a, [4 x <2 x i32>] %b)  {
>> +; CHECK-LABEL: test_vst1_s32_x4
>> +; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
>> +; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [4 x <2 x i32>] %b, 0
>> +  %2 = extractvalue [4 x <2 x i32>] %b, 1
>> +  %3 = extractvalue [4 x <2 x i32>] %b, 2
>> +  %4 = extractvalue [4 x <2 x i32>] %b, 3
>> +  %5 = bitcast i32* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x4.v2i32(i8* %5, <2 x i32> %1,
>> <2
>> x i32> %2, <2 x i32> %3, <2 x i32> %4, i32 4)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_s64_x4(i64* %a, [4 x <1 x i64>] %b)  {
>> +; CHECK-LABEL: test_vst1_s64_x4
>> +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
>> +; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [4 x <1 x i64>] %b, 0
>> +  %2 = extractvalue [4 x <1 x i64>] %b, 1
>> +  %3 = extractvalue [4 x <1 x i64>] %b, 2
>> +  %4 = extractvalue [4 x <1 x i64>] %b, 3
>> +  %5 = bitcast i64* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x4.v1i64(i8* %5, <1 x i64> %1,
>> <1
>> x i64> %2, <1 x i64> %3, <1 x i64> %4, i32 8)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_f32_x4(float* %a, [4 x <2 x float>] %b)  {
>> +; CHECK-LABEL: test_vst1_f32_x4
>> +; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s,
>> +; v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [4 x <2 x float>] %b, 0
>> +  %2 = extractvalue [4 x <2 x float>] %b, 1
>> +  %3 = extractvalue [4 x <2 x float>] %b, 2
>> +  %4 = extractvalue [4 x <2 x float>] %b, 3
>> +  %5 = bitcast float* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x4.v2f32(i8* %5, <2 x float>
>> %1,
>> <2 x float> %2, <2 x float> %3, <2 x float> %4, i32 4)
>> +  ret void
>> +}
>> +
>> +define void @test_vst1_f64_x4(double* %a, [4 x <1 x double>] %b)  {
>> +; CHECK-LABEL: test_vst1_f64_x4
>> +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d,
>> +; v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}]
>> +  %1 = extractvalue [4 x <1 x double>] %b, 0
>> +  %2 = extractvalue [4 x <1 x double>] %b, 1
>> +  %3 = extractvalue [4 x <1 x double>] %b, 2
>> +  %4 = extractvalue [4 x <1 x double>] %b, 3
>> +  %5 = bitcast double* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x4.v1f64(i8* %5, <1 x double>
>> %1,
>> <1 x double> %2, <1 x double> %3, <1 x double> %4, i32 8)
>> +  ret void
>> +}
>> +
>> +declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8*,
>> i32)
>> +declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8*,
>> i32)
>> +declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.vld1x2.v4i32(i8*,
>> i32)
>> +declare { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.vld1x2.v2i64(i8*,
>> i32)
>> +declare { <4 x float>, <4 x float> }
>> @llvm.aarch64.neon.vld1x2.v4f32(i8*,
>> i32)
>> +declare { <2 x double>, <2 x double> }
>> @llvm.aarch64.neon.vld1x2.v2f64(i8*, i32)
>> +declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.vld1x2.v8i8(i8*, i32)
>> +declare { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.vld1x2.v4i16(i8*,
>> i32)
>> +declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.vld1x2.v2i32(i8*,
>> i32)
>> +declare { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.vld1x2.v1i64(i8*,
>> i32)
>> +declare { <2 x float>, <2 x float> }
>> @llvm.aarch64.neon.vld1x2.v2f32(i8*,
>> i32)
>> +declare { <1 x double>, <1 x double> }
>> @llvm.aarch64.neon.vld1x2.v1f64(i8*, i32)
>> +declare { <16 x i8>, <16 x i8>, <16 x i8> }
>> @llvm.aarch64.neon.vld1x3.v16i8(i8*, i32)
>> +declare { <8 x i16>, <8 x i16>, <8 x i16> }
>> @llvm.aarch64.neon.vld1x3.v8i16(i8*, i32)
>> +declare { <4 x i32>, <4 x i32>, <4 x i32> }
>> @llvm.aarch64.neon.vld1x3.v4i32(i8*, i32)
>> +declare { <2 x i64>, <2 x i64>, <2 x i64> }
>> @llvm.aarch64.neon.vld1x3.v2i64(i8*, i32)
>> +declare { <4 x float>, <4 x float>, <4 x float> }
>> @llvm.aarch64.neon.vld1x3.v4f32(i8*, i32)
>> +declare { <2 x double>, <2 x double>, <2 x double> }
>> @llvm.aarch64.neon.vld1x3.v2f64(i8*, i32)
>> +declare { <8 x i8>, <8 x i8>, <8 x i8> }
>> @llvm.aarch64.neon.vld1x3.v8i8(i8*, i32)
>> +declare { <4 x i16>, <4 x i16>, <4 x i16> }
>> @llvm.aarch64.neon.vld1x3.v4i16(i8*, i32)
>> +declare { <2 x i32>, <2 x i32>, <2 x i32> }
>> @llvm.aarch64.neon.vld1x3.v2i32(i8*, i32)
>> +declare { <1 x i64>, <1 x i64>, <1 x i64> }
>> @llvm.aarch64.neon.vld1x3.v1i64(i8*, i32)
>> +declare { <2 x float>, <2 x float>, <2 x float> }
>> @llvm.aarch64.neon.vld1x3.v2f32(i8*, i32)
>> +declare { <1 x double>, <1 x double>, <1 x double> }
>> @llvm.aarch64.neon.vld1x3.v1f64(i8*, i32)
>> +declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> }
>> @llvm.aarch64.neon.vld1x4.v16i8(i8*, i32)
>> +declare { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> }
>> @llvm.aarch64.neon.vld1x4.v8i16(i8*, i32)
>> +declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> }
>> @llvm.aarch64.neon.vld1x4.v4i32(i8*, i32)
>> +declare { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> }
>> @llvm.aarch64.neon.vld1x4.v2i64(i8*, i32)
>> +declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> }
>> @llvm.aarch64.neon.vld1x4.v4f32(i8*, i32)
>> +declare { <2 x double>, <2 x double>, <2 x double>, <2 x double> }
>> @llvm.aarch64.neon.vld1x4.v2f64(i8*, i32)
>> +declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
>> @llvm.aarch64.neon.vld1x4.v8i8(i8*, i32)
>> +declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
>> @llvm.aarch64.neon.vld1x4.v4i16(i8*, i32)
>> +declare { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
>> @llvm.aarch64.neon.vld1x4.v2i32(i8*, i32)
>> +declare { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> }
>> @llvm.aarch64.neon.vld1x4.v1i64(i8*, i32)
>> +declare { <2 x float>, <2 x float>, <2 x float>, <2 x float> }
>> @llvm.aarch64.neon.vld1x4.v2f32(i8*, i32)
>> +declare { <1 x double>, <1 x double>, <1 x double>, <1 x double> }
>> @llvm.aarch64.neon.vld1x4.v1f64(i8*, i32)
>> +declare void @llvm.aarch64.neon.vst1x2.v16i8(i8*, <16 x i8>, <16 x i8>,
>> i32)
>> +declare void @llvm.aarch64.neon.vst1x2.v8i16(i8*, <8 x i16>, <8 x i16>,
>> i32)
>> +declare void @llvm.aarch64.neon.vst1x2.v4i32(i8*, <4 x i32>, <4 x i32>,
>> i32)
>> +declare void @llvm.aarch64.neon.vst1x2.v2i64(i8*, <2 x i64>, <2 x i64>,
>> i32)
>> +declare void @llvm.aarch64.neon.vst1x2.v4f32(i8*, <4 x float>, <4 x
>> float>, i32)
>> +declare void @llvm.aarch64.neon.vst1x2.v2f64(i8*, <2 x double>, <2 x
>> double>, i32)
>> +declare void @llvm.aarch64.neon.vst1x2.v8i8(i8*, <8 x i8>, <8 x i8>,
>> i32)
>> +declare void @llvm.aarch64.neon.vst1x2.v4i16(i8*, <4 x i16>, <4 x i16>,
>> i32)
>> +declare void @llvm.aarch64.neon.vst1x2.v2i32(i8*, <2 x i32>, <2 x i32>,
>> i32)
>> +declare void @llvm.aarch64.neon.vst1x2.v1i64(i8*, <1 x i64>, <1 x i64>,
>> i32)
>> +declare void @llvm.aarch64.neon.vst1x2.v2f32(i8*, <2 x float>, <2 x
>> float>, i32)
>> +declare void @llvm.aarch64.neon.vst1x2.v1f64(i8*, <1 x double>, <1 x
>> double>, i32)
>> +declare void @llvm.aarch64.neon.vst1x3.v16i8(i8*, <16 x i8>, <16 x i8>,
>> <16 x i8>, i32)
>> +declare void @llvm.aarch64.neon.vst1x3.v8i16(i8*, <8 x i16>, <8 x i16>,
>> <8 x i16>, i32)
>> +declare void @llvm.aarch64.neon.vst1x3.v4i32(i8*, <4 x i32>, <4 x i32>,
>> <4 x i32>, i32)
>> +declare void @llvm.aarch64.neon.vst1x3.v2i64(i8*, <2 x i64>, <2 x i64>,
>> <2 x i64>, i32)
>> +declare void @llvm.aarch64.neon.vst1x3.v4f32(i8*, <4 x float>, <4 x
>> float>, <4 x float>, i32)
>> +declare void @llvm.aarch64.neon.vst1x3.v2f64(i8*, <2 x double>, <2 x
>> double>, <2 x double>, i32)
>> +declare void @llvm.aarch64.neon.vst1x3.v8i8(i8*, <8 x i8>, <8 x i8>, <8
>> x
>> i8>, i32)
>> +declare void @llvm.aarch64.neon.vst1x3.v4i16(i8*, <4 x i16>, <4 x i16>,
>> <4 x i16>, i32)
>> +declare void @llvm.aarch64.neon.vst1x3.v2i32(i8*, <2 x i32>, <2 x i32>,
>> <2 x i32>, i32)
>> +declare void @llvm.aarch64.neon.vst1x3.v1i64(i8*, <1 x i64>, <1 x i64>,
>> <1 x i64>, i32)
>> +declare void @llvm.aarch64.neon.vst1x3.v2f32(i8*, <2 x float>, <2 x
>> float>, <2 x float>, i32)
>> +declare void @llvm.aarch64.neon.vst1x3.v1f64(i8*, <1 x double>, <1 x
>> double>, <1 x double>, i32)
>> +declare void @llvm.aarch64.neon.vst1x4.v16i8(i8*, <16 x i8>, <16 x i8>,
>> <16 x i8>, <16 x i8>, i32)
>> +declare void @llvm.aarch64.neon.vst1x4.v8i16(i8*, <8 x i16>, <8 x i16>,
>> <8 x i16>, <8 x i16>, i32)
>> +declare void @llvm.aarch64.neon.vst1x4.v4i32(i8*, <4 x i32>, <4 x i32>,
>> <4 x i32>, <4 x i32>, i32)
>> +declare void @llvm.aarch64.neon.vst1x4.v2i64(i8*, <2 x i64>, <2 x i64>,
>> <2 x i64>, <2 x i64>, i32)
>> +declare void @llvm.aarch64.neon.vst1x4.v4f32(i8*, <4 x float>, <4 x
>> float>, <4 x float>, <4 x float>, i32)
>> +declare void @llvm.aarch64.neon.vst1x4.v2f64(i8*, <2 x double>, <2 x
>> double>, <2 x double>, <2 x double>, i32)
>> +declare void @llvm.aarch64.neon.vst1x4.v8i8(i8*, <8 x i8>, <8 x i8>, <8
>> x
>> i8>, <8 x i8>, i32)
>> +declare void @llvm.aarch64.neon.vst1x4.v4i16(i8*, <4 x i16>, <4 x i16>,
>> <4 x i16>, <4 x i16>, i32)
>> +declare void @llvm.aarch64.neon.vst1x4.v2i32(i8*, <2 x i32>, <2 x i32>,
>> <2 x i32>, <2 x i32>, i32)
>> +declare void @llvm.aarch64.neon.vst1x4.v1i64(i8*, <1 x i64>, <1 x i64>,
>> <1 x i64>, <1 x i64>, i32)
>> +declare void @llvm.aarch64.neon.vst1x4.v2f32(i8*, <2 x float>, <2 x
>> float>, <2 x float>, <2 x float>, i32)
>> +declare void @llvm.aarch64.neon.vst1x4.v1f64(i8*, <1 x double>, <1 x
>> double>, <1 x double>, <1 x double>, i32)
>>
>> Modified:
>> llvm/trunk/test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll
>> URL:
>>
> http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/neon-sim
> d-post-ldst-multi-elem.ll?rev=194990&r1=194989&r2=194990&view=diff
>>
> ============================================================================
> ==
>> --- llvm/trunk/test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll
>> (original)
>> +++ llvm/trunk/test/CodeGen/AArch64/neon-simd-post-ldst-multi-elem.ll
>> Mon
>> Nov 18 00:31:53 2013
>> @@ -1,5 +1,6 @@
>>  ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu
>> -mattr=+neon | FileCheck %s
>>
>> +;Check for a post-increment updating load.
>>  define <4 x i16> @test_vld1_fx_update(i16** %ptr) nounwind {
>>  ; CHECK: test_vld1_fx_update
>>  ; CHECK: ld1 {v{{[0-9]+}}.4h}, [x{{[0-9]+|sp}}], #8
>> @@ -11,6 +12,7 @@ define <4 x i16> @test_vld1_fx_update(i1
>>    ret <4 x i16> %tmp1
>>  }
>>
>> +;Check for a post-increment updating load with register increment.
>>  define <2 x i32> @test_vld1_reg_update(i32** %ptr, i32 %inc) nounwind {
>>  ; CHECK: test_vld1_reg_update
>>  ; CHECK: ld1 {v{{[0-9]+}}.2s}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
>> @@ -81,7 +83,6 @@ define <8 x i16> @test_vld4_fx_update(i1
>>    ret <8 x i16> %tmp2
>>  }
>>
>> -;Check for a post-increment updating load with register increment.
>>  define <8 x i8> @test_vld4_reg_update(i8** %ptr, i32 %inc) nounwind {
>>  ; CHECK: test_vld4_reg_update
>>  ; CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b,
>> v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
>> @@ -93,7 +94,6 @@ define <8 x i8> @test_vld4_reg_update(i8
>>    ret <8 x i8> %tmp1
>>  }
>>
>> -;Check for a post-increment updating store.
>>  define void @test_vst1_fx_update(float** %ptr, <2 x float> %B) nounwind
>> {
>>  ; CHECK: test_vst1_fx_update
>>  ; CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}], #8
>> @@ -198,3 +198,157 @@ declare void @llvm.arm.neon.vst3.v2i32(i
>>  declare void @llvm.arm.neon.vst3.v8i16(i8*, <8 x i16>, <8 x i16>, <8 x
>> i16>, i32)
>>  declare void @llvm.arm.neon.vst4.v4f32(i8*, <4 x float>, <4 x float>,
>> <4
>> x float>, <4 x float>, i32)
>>  declare void @llvm.arm.neon.vst4.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x
>> i8>,
>> <8 x i8>, i32)
>> +
>> +define <16 x i8> @test_vld1x2_fx_update(i8* %a, i8** %ptr) {
>> +; CHECK: test_vld1x2_fx_update
>> +; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], #32
>> +  %1 = call { <16 x i8>, <16 x i8> }
>> @llvm.aarch64.neon.vld1x2.v16i8(i8*
>> %a, i32 1)
>> +  %2 = extractvalue { <16 x i8>, <16 x i8> } %1, 0
>> +  %tmp1 = getelementptr i8* %a, i32 32
>> +  store i8* %tmp1, i8** %ptr
>> +  ret <16 x i8> %2
>> +}
>> +
>> +define <8 x i16> @test_vld1x2_reg_update(i16* %a, i16** %ptr, i32 %inc)
>> {
>> +; CHECK: test_vld1x2_reg_update
>> +; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}],
>> x{{[0-9]+}}
>> +  %1 = bitcast i16* %a to i8*
>> +  %2 = tail call { <8 x i16>, <8 x i16> }
>> @llvm.aarch64.neon.vld1x2.v8i16(i8* %1, i32 2)
>> +  %3 = extractvalue { <8 x i16>, <8 x i16> } %2, 0
>> +  %tmp1 = getelementptr i16* %a, i32 %inc
>> +  store i16* %tmp1, i16** %ptr
>> +  ret <8 x i16> %3
>> +}
>> +
>> +define <2 x i64> @test_vld1x3_fx_update(i64* %a, i64** %ptr) {
>> +; CHECK: test_vld1x3_fx_update
>> +; CHECK: ld1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d},
>> [x{{[0-9]+|sp}}], #48
>> +  %1 = bitcast i64* %a to i8*
>> +  %2 = tail call { <2 x i64>, <2 x i64>, <2 x i64> }
>> @llvm.aarch64.neon.vld1x3.v2i64(i8* %1, i32 8)
>> +  %3 = extractvalue { <2 x i64>, <2 x i64>, <2 x i64> } %2, 0
>> +  %tmp1 = getelementptr i64* %a, i32 6
>> +  store i64* %tmp1, i64** %ptr
>> +  ret  <2 x i64> %3
>> +}
>> +
>> +define <8 x i16> @test_vld1x3_reg_update(i16* %a, i16** %ptr, i32 %inc)
>> {
>> +; CHECK: test_vld1x3_reg_update
>> +; CHECK: ld1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h},
>> [x{{[0-9]+|sp}}], x{{[0-9]+}}
>> +  %1 = bitcast i16* %a to i8*
>> +  %2 = tail call { <8 x i16>, <8 x i16>, <8 x i16> }
>> @llvm.aarch64.neon.vld1x3.v8i16(i8* %1, i32 2)
>> +  %3 = extractvalue { <8 x i16>, <8 x i16>, <8 x i16> } %2, 0
>> +  %tmp1 = getelementptr i16* %a, i32 %inc
>> +  store i16* %tmp1, i16** %ptr
>> +  ret <8 x i16> %3
>> +}
>> +
>> +define <4 x float> @test_vld1x4_fx_update(float* %a, float** %ptr) {
>> +; CHECK: test_vld1x4_fx_update
>> +; CHECK: ld1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
>> v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #64
>> +  %1 = bitcast float* %a to i8*
>> +  %2 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> }
>> @llvm.aarch64.neon.vld1x4.v4f32(i8* %1, i32 4)
>> +  %3 = extractvalue { <4 x float>, <4 x float>, <4 x float>, <4 x
>> float>
>> } %2, 0
>> +  %tmp1 = getelementptr float* %a, i32 16
>> +  store float* %tmp1, float** %ptr
>> +  ret <4 x float> %3
>> +}
>> +
>> +define <8 x i8> @test_vld1x4_reg_update(i8* readonly %a, i8** %ptr, i32
>> %inc) #0 {
>> +; CHECK: test_vld1x4_reg_update
>> +; CHECK: ld1 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b,
>> v{{[0-9]+}}.8b}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
>> +  %1 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
>> @llvm.aarch64.neon.vld1x4.v8i8(i8* %a, i32 1)
>> +  %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
>> +  %tmp1 = getelementptr i8* %a, i32 %inc
>> +  store i8* %tmp1, i8** %ptr
>> +  ret <8 x i8> %2
>> +}
>> +
>> +define void @test_vst1x2_fx_update(i8* %a, [2 x <16 x i8>] %b.coerce,
>> i8** %ptr) #2 {
>> +; CHECK: test_vst1x2_fx_update
>> +; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [x{{[0-9]+|sp}}], #32
>> +  %1 = extractvalue [2 x <16 x i8>] %b.coerce, 0
>> +  %2 = extractvalue [2 x <16 x i8>] %b.coerce, 1
>> +  tail call void @llvm.aarch64.neon.vst1x2.v16i8(i8* %a, <16 x i8> %1,
>> <16 x i8> %2, i32 1)
>> +  %tmp1 = getelementptr i8* %a, i32 32
>> +  store i8* %tmp1, i8** %ptr
>> +  ret void
>> +}
>> +
>> +define void @test_vst1x2_reg_update(i16* %a, [2 x <8 x i16>] %b.coerce,
>> i16** %ptr, i32 %inc) #2 {
>> +; CHECK: test_vst1x2_reg_update
>> +; CHECK: st1 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [x{{[0-9]+|sp}}],
>> x{{[0-9]+}}
>> +  %1 = extractvalue [2 x <8 x i16>] %b.coerce, 0
>> +  %2 = extractvalue [2 x <8 x i16>] %b.coerce, 1
>> +  %3 = bitcast i16* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x2.v8i16(i8* %3, <8 x i16> %1,
>> <8
>> x i16> %2, i32 2)
>> +  %tmp1 = getelementptr i16* %a, i32 %inc
>> +  store i16* %tmp1, i16** %ptr
>> +  ret void
>> +}
>> +
>> +define void @test_vst1x3_fx_update(i32* %a, [3 x <2 x i32>] %b.coerce,
>> i32** %ptr) #2 {
>> +; CHECK: test_vst1x3_fx_update
>> +; CHECK: st1 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s},
>> [x{{[0-9]+|sp}}], #24
>> +  %1 = extractvalue [3 x <2 x i32>] %b.coerce, 0
>> +  %2 = extractvalue [3 x <2 x i32>] %b.coerce, 1
>> +  %3 = extractvalue [3 x <2 x i32>] %b.coerce, 2
>> +  %4 = bitcast i32* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x3.v2i32(i8* %4, <2 x i32> %1,
>> <2
>> x i32> %2, <2 x i32> %3, i32 4)
>> +  %tmp1 = getelementptr i32* %a, i32 6
>> +  store i32* %tmp1, i32** %ptr
>> +  ret void
>> +}
>> +
>> +define void @test_vst1x3_reg_update(i64* %a, [3 x <1 x i64>] %b.coerce,
>> i64** %ptr, i32 %inc) #2 {
>> +; CHECK: test_vst1x3_reg_update
>> +; CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d},
>> [x{{[0-9]+|sp}}], x{{[0-9]+}}
>> +  %1 = extractvalue [3 x <1 x i64>] %b.coerce, 0
>> +  %2 = extractvalue [3 x <1 x i64>] %b.coerce, 1
>> +  %3 = extractvalue [3 x <1 x i64>] %b.coerce, 2
>> +  %4 = bitcast i64* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x3.v1i64(i8* %4, <1 x i64> %1,
>> <1
>> x i64> %2, <1 x i64> %3, i32 8)
>> +  %tmp1 = getelementptr i64* %a, i32 %inc
>> +  store i64* %tmp1, i64** %ptr
>> +  ret void
>> +}
>> +
>> +define void @test_vst1x4_fx_update(float* %a, [4 x <4 x float>]
>> %b.coerce, float** %ptr) #2 {
>> +; CHECK: test_vst1x4_fx_update
>> +; CHECK: st1 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s,
>> v{{[0-9]+}}.4s}, [x{{[0-9]+|sp}}], #64
>> +  %1 = extractvalue [4 x <4 x float>] %b.coerce, 0
>> +  %2 = extractvalue [4 x <4 x float>] %b.coerce, 1
>> +  %3 = extractvalue [4 x <4 x float>] %b.coerce, 2
>> +  %4 = extractvalue [4 x <4 x float>] %b.coerce, 3
>> +  %5 = bitcast float* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x4.v4f32(i8* %5, <4 x float>
>> %1,
>> <4 x float> %2, <4 x float> %3, <4 x float> %4, i32 4)
>> +  %tmp1 = getelementptr float* %a, i32 16
>> +  store float* %tmp1, float** %ptr
>> +  ret void
>> +}
>> +
>> +define void @test_vst1x4_reg_update(double* %a, [4 x <2 x double>]
>> %b.coerce, double** %ptr, i32 %inc) #2 {
>> +; CHECK: test_vst1x4_reg_update
>> +; CHECK: st1 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d,
>> v{{[0-9]+}}.2d}, [x{{[0-9]+|sp}}], x{{[0-9]+}}
>> +  %1 = extractvalue [4 x <2 x double>] %b.coerce, 0
>> +  %2 = extractvalue [4 x <2 x double>] %b.coerce, 1
>> +  %3 = extractvalue [4 x <2 x double>] %b.coerce, 2
>> +  %4 = extractvalue [4 x <2 x double>] %b.coerce, 3
>> +  %5 = bitcast double* %a to i8*
>> +  tail call void @llvm.aarch64.neon.vst1x4.v2f64(i8* %5, <2 x double>
>> %1,
>> <2 x double> %2, <2 x double> %3, <2 x double> %4, i32 8)
>> +  %tmp1 = getelementptr double* %a, i32 %inc
>> +  store double* %tmp1, double** %ptr
>> +  ret void
>> +}
>> +
>> +declare { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.vld1x2.v16i8(i8*,
>> i32)
>> +declare { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.vld1x2.v8i16(i8*,
>> i32)
>> +declare { <2 x i64>, <2 x i64>, <2 x i64> }
>> @llvm.aarch64.neon.vld1x3.v2i64(i8*, i32)
>> +declare { <8 x i16>, <8 x i16>, <8 x i16> }
>> @llvm.aarch64.neon.vld1x3.v8i16(i8*, i32)
>> +declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> }
>> @llvm.aarch64.neon.vld1x4.v4f32(i8*, i32)
>> +declare { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
>> @llvm.aarch64.neon.vld1x4.v8i8(i8*, i32)
>> +declare void @llvm.aarch64.neon.vst1x2.v16i8(i8*, <16 x i8>, <16 x i8>,
>> i32)
>> +declare void @llvm.aarch64.neon.vst1x2.v8i16(i8*, <8 x i16>, <8 x i16>,
>> i32)
>> +declare void @llvm.aarch64.neon.vst1x3.v2i32(i8*, <2 x i32>, <2 x i32>,
>> <2 x i32>, i32)
>> +declare void @llvm.aarch64.neon.vst1x3.v1i64(i8*, <1 x i64>, <1 x i64>,
>> <1 x i64>, i32)
>> +declare void @llvm.aarch64.neon.vst1x4.v4f32(i8*, <4 x float>, <4 x
>> float>, <4 x float>, <4 x float>, i32) #3
>> +declare void @llvm.aarch64.neon.vst1x4.v2f64(i8*, <2 x double>, <2 x
>> double>, <2 x double>, <2 x double>, i32) #3
>>
>>
>> _______________________________________________
>> llvm-commits mailing list
>> llvm-commits at cs.uiuc.edu
>> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>>
>
>
> --
> Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
> hosted by The Linux Foundation
>
>
>
>
>


-- 
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
hosted by The Linux Foundation




More information about the llvm-commits mailing list