[llvm-commits] [llvm-gcc-4.2] r76781 [3/5] - in /llvm-gcc-4.2/trunk: ./ gcc/ gcc/config/ gcc/config/arm/ gcc/config/rs6000/ gcc/cp/ gcc/doc/ gcc/testsuite/g++.apple/ gcc/testsuite/g++.dg/abi/ gcc/testsuite/gcc.apple/ gcc/testsuite/gcc.target/arm/ gcc/testsuite/gcc.target/arm/neon/ gcc/testsuite/obj-c++.dg/ gcc/testsuite/objc.dg/
Bob Wilson
bob.wilson at apple.com
Wed Jul 22 13:36:46 PDT 2009
Added: llvm-gcc-4.2/trunk/gcc/config/arm/thumb2.md
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/thumb2.md?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/thumb2.md (added)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/thumb2.md Wed Jul 22 15:36:27 2009
@@ -0,0 +1,1164 @@
+;; APPLE LOCAL file v7 support. Merge from mainline
+;; ARM Thumb-2 Machine Description
+;; Copyright (C) 2007 Free Software Foundation, Inc.
+;; Written by CodeSourcery, LLC.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the Free
+;; Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+;; 02111-1307, USA. */
+
+;; Note: Thumb-2 is the variant of the Thumb architecture that adds
+;; 32-bit encodings of [almost all of] the Arm instruction set.
+;; Some old documents refer to the relatively minor interworking
+;; changes made in armv5t as "thumb2". These are considered part
+;; the 16-bit Thumb-1 instruction set.
+
+(include "hwdiv.md")
+
+(define_insn "*thumb2_incscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_operator:SI 2 "arm_comparison_operator"
+ [(match_operand:CC 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ "TARGET_THUMB2"
+ "@
+ it\\t%d2\;add%d2\\t%0, %1, #1
+ ite\\t%D2\;mov%D2\\t%0, %1\;add%d2\\t%0, %1, #1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "6,10")]
+)
+
+(define_insn "*thumb2_decscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 2 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])))]
+ "TARGET_THUMB2"
+ "@
+ it\\t%d2\;sub%d2\\t%0, %1, #1
+ ite\\t%D2\;mov%D2\\t%0, %1\;sub%d2\\t%0, %1, #1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "6,10")]
+)
+
+;; Thumb-2 only allows shift by constant on data processing instructions
+(define_insn "*thumb_andsi_not_shiftsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "const_int_operand" "M")]))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_ARM"
+ "bic%?\\t%0, %1, %2%S4"
+ [(set_attr "predicable" "yes")
+ (set_attr "shift" "2")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_smaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "@
+ cmp\\t%1, %2\;it\\tlt\;movlt\\t%0, %2
+ cmp\\t%1, %2\;it\\tge\;movge\\t%0, %1
+ cmp\\t%1, %2\;ite\\tge\;movge\\t%0, %1\;movlt\\t%0, %2"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "10,10,14")]
+)
+
+(define_insn "*thumb2_sminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "@
+ cmp\\t%1, %2\;it\\tge\;movge\\t%0, %2
+ cmp\\t%1, %2\;it\\tlt\;movlt\\t%0, %1
+ cmp\\t%1, %2\;ite\\tlt\;movlt\\t%0, %1\;movge\\t%0, %2"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "10,10,14")]
+)
+
+(define_insn "*thumb32_umaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "@
+ cmp\\t%1, %2\;it\\tcc\;movcc\\t%0, %2
+ cmp\\t%1, %2\;it\\tcs\;movcs\\t%0, %1
+ cmp\\t%1, %2\;ite\\tcs\;movcs\\t%0, %1\;movcc\\t%0, %2"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "10,10,14")]
+)
+
+(define_insn "*thumb2_uminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "@
+ cmp\\t%1, %2\;it\\tcs\;movcs\\t%0, %2
+ cmp\\t%1, %2\;it\\tcc\;movcc\\t%0, %1
+ cmp\\t%1, %2\;ite\\tcc\;movcc\\t%0, %1\;movcs\\t%0, %2"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "10,10,14")]
+)
+
+(define_insn "*thumb2_notsi_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "M")])))]
+ "TARGET_THUMB2"
+ "mvn%?\\t%0, %1%S3"
+ [(set_attr "predicable" "yes")
+ (set_attr "shift" "1")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_notsi_shiftsi_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "M")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_op_dup 3 [(match_dup 1) (match_dup 2)])))]
+ "TARGET_THUMB2"
+ "mvn%.\\t%0, %1%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_not_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "M")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "TARGET_THUMB2"
+ "mvn%.\\t%0, %1%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")
+ (set_attr "type" "alu_shift")]
+)
+
+;; Thumb-2 does not have rsc, so use a clever trick with shifter operands.
+(define_insn "*thumb2_negdi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,r")
+ (neg:DI (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "negs\\t%Q0, %Q1\;sbc\\t%R0, %R1, %R1, lsl #1"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
+
+(define_insn "*thumb2_abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (abs:SI (match_operand:SI 1 "s_register_operand" "0,r")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "@
+ cmp\\t%0, #0\;it\tlt\;rsblt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31"
+ [(set_attr "conds" "clob,*")
+ (set_attr "shift" "1")
+ ;; predicable can't be set based on the variant, so left as no
+ (set_attr "length" "10,8")]
+)
+
+(define_insn "*thumb2_neg_abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "0,r"))))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "@
+ cmp\\t%0, #0\;it\\tgt\;rsbgt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31"
+ [(set_attr "conds" "clob,*")
+ (set_attr "shift" "1")
+ ;; predicable can't be set based on the variant, so left as no
+ (set_attr "length" "10,8")]
+)
+
+(define_insn "*thumb2_movdi"
+ [(set (match_operand:DI 0 "nonimmediate_di_operand" "=r, r, r, r, m")
+ (match_operand:DI 1 "di_operand" "rDa,Db,Dc,mi,r"))]
+ "TARGET_THUMB2
+ && !(TARGET_HARD_FLOAT && (TARGET_MAVERICK || TARGET_VFP))
+ && !TARGET_IWMMXT"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ case 2:
+ return \"#\";
+ default:
+ return output_move_double (operands);
+ }
+ "
+ [(set_attr "length" "8,12,16,8,8")
+ (set_attr "type" "*,*,*,load2,store2")
+ (set_attr "pool_range" "*,*,*,4096,*")
+ (set_attr "neg_pool_range" "*,*,*,0,*")]
+)
+
+(define_insn "*thumb2_movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r, m")
+ (match_operand:SI 1 "general_operand" "rI,K,N,mi,r"))]
+ "TARGET_THUMB2 && ! TARGET_IWMMXT
+ && !(TARGET_HARD_FLOAT && TARGET_VFP)
+ && ( register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode))"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ movw%?\\t%0, %1
+ ldr%?\\t%0, %1
+ str%?\\t%1, %0"
+ [(set_attr "type" "*,*,*,load1,store1")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "*,*,*,4096,*")
+ (set_attr "neg_pool_range" "*,*,*,0,*")]
+)
+
+;; ??? We can probably do better with thumb2
+(define_insn "pic_load_addr_thumb2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "" "mX")
+ (label_ref (match_operand 2 "" ""))] UNSPEC_PIC_SYM))]
+ "TARGET_THUMB2 && (flag_pic || (TARGET_MACHO && MACHO_DYNAMIC_NO_PIC_P))"
+ "ldr%?\\t%0, %1"
+ [(set_attr "type" "load1")
+ (set_attr "pool_range" "4096")
+ (set_attr "neg_pool_range" "0")]
+)
+
+;; Set reg to the address of this instruction plus four. The low two
+;; bits of the PC are always read as zero, so ensure the instructions is
+;; word aligned.
+(define_insn "pic_load_dot_plus_four"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(const (plus:SI (pc) (const_int 4)))]
+ UNSPEC_PIC_BASE))
+ (use (match_operand 1 "" ""))]
+ "TARGET_THUMB2"
+ "*
+ assemble_align(BITS_PER_WORD);
+ (*targetm.asm_out.internal_label) (asm_out_file, \"LPIC\",
+ INTVAL (operands[1]));
+ /* We use adr because some buggy gas assemble add r8, pc, #0
+ to add.w r8, pc, #0, not addw r8, pc, #0. */
+ asm_fprintf (asm_out_file, \"\\tadr\\t%r, %LLPIC%d + 4\\n\",
+ REGNO(operands[0]), (int)INTVAL (operands[1]));
+ return \"\";
+ "
+ [(set_attr "length" "6")]
+)
+
+;; Thumb-2 always has load/store halfword instructions, so we can avoid a lot
+;; of the messyness assocuated with the ARM patterns.
+(define_insn "*thumb2_movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r")
+ (match_operand:HI 1 "general_operand" "rI,n,r,m"))]
+ "TARGET_THUMB2"
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ movw%?\\t%0, %L1\\t%@ movhi
+ str%(h%)\\t%1, %0\\t%@ movhi
+ ldr%(h%)\\t%0, %1\\t%@ movhi"
+ [(set_attr "type" "*,*,store1,load1")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "*,*,*,4096")
+ (set_attr "neg_pool_range" "*,*,*,250")]
+)
+
+(define_insn "*thumb2_movsf_soft_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:SF 1 "general_operand" "r,mE,r"))]
+ "TARGET_THUMB2
+ && TARGET_SOFT_FLOAT
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], SFmode))"
+ "@
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "*,load1,store1")
+ (set_attr "pool_range" "*,4096,*")
+ (set_attr "neg_pool_range" "*,0,*")]
+)
+
+(define_insn "*thumb2_movdf_soft_insn"
+ [(set (match_operand:DF 0 "nonimmediate_soft_df_operand" "=r,r,r,r,m")
+ (match_operand:DF 1 "soft_df_operand" "rDa,Db,Dc,mF,r"))]
+ "TARGET_THUMB2 && TARGET_SOFT_FLOAT
+ && ( register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ case 2:
+ return \"#\";
+ default:
+ return output_move_double (operands);
+ }
+ "
+ [(set_attr "length" "8,12,16,8,8")
+ (set_attr "type" "*,*,*,load2,store2")
+ (set_attr "pool_range" "1020")
+ (set_attr "neg_pool_range" "0")]
+)
+
+(define_insn "*thumb2_cmpsi_shiftsi"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "M")])))]
+ "TARGET_THUMB2"
+ "cmp%?\\t%0, %1%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_cmpsi_shiftsi_swp"
+ [(set (reg:CC_SWP CC_REGNUM)
+ (compare:CC_SWP (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "M")])
+ (match_operand:SI 0 "s_register_operand" "r")))]
+ "TARGET_THUMB2"
+ "cmp%?\\t%0, %1%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_cmpsi_neg_shiftsi"
+ [(set (reg:CC CC_REGNUM)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (neg:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "M")]))))]
+ "TARGET_THUMB2"
+ "cmn%?\\t%0, %1%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "1")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_mov_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "arm_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)]))]
+ "TARGET_THUMB2"
+ "ite\\t%D1\;mov%D1\\t%0, #0\;mov%d1\\t%0, #1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "10")]
+)
+
+(define_insn "*thumb2_mov_negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator:SI 1 "arm_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ "TARGET_THUMB2"
+ "ite\\t%D1\;mov%D1\\t%0, #0\;mvn%d1\\t%0, #0"
+ [(set_attr "conds" "use")
+ (set_attr "length" "10")]
+)
+
+(define_insn "*thumb2_mov_notscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 1 "arm_comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ "TARGET_THUMB2"
+ "ite\\t%D1\;mov%D1\\t%0, #0\;mvn%d1\\t%0, #1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "10")]
+)
+
+(define_insn "*thumb2_movsicc_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,0,rI,K,rI,rI,K,K")
+ (match_operand:SI 2 "arm_not_operand" "rI,K,0,0,rI,K,rI,K")))]
+ "TARGET_THUMB2"
+ "@
+ it\\t%D3\;mov%D3\\t%0, %2
+ it\\t%D3\;mvn%D3\\t%0, #%B2
+ it\\t%d3\;mov%d3\\t%0, %1
+ it\\t%d3\;mvn%d3\\t%0, #%B1
+ ite\\t%d3\;mov%d3\\t%0, %1\;mov%D3\\t%0, %2
+ ite\\t%d3\;mov%d3\\t%0, %1\;mvn%D3\\t%0, #%B2
+ ite\\t%d3\;mvn%d3\\t%0, #%B1\;mov%D3\\t%0, %2
+ ite\\t%d3\;mvn%d3\\t%0, #%B1\;mvn%D3\\t%0, #%B2"
+ [(set_attr "length" "6,6,6,6,10,10,10,10")
+ (set_attr "conds" "use")]
+)
+
+(define_insn "*thumb2_movsfcc_soft_insn"
+ [(set (match_operand:SF 0 "s_register_operand" "=r,r")
+ (if_then_else:SF (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "s_register_operand" "0,r")
+ (match_operand:SF 2 "s_register_operand" "r,0")))]
+ "TARGET_THUMB2 && TARGET_SOFT_FLOAT"
+ "@
+ it\\t%D3\;mov%D3\\t%0, %2
+ it\\t%d3\;mov%d3\\t%0, %1"
+ [(set_attr "length" "6,6")
+ (set_attr "conds" "use")]
+)
+
+(define_insn "*call_reg_thumb2"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_THUMB2"
+ "blx%?\\t%0"
+ [(set_attr "type" "call")]
+)
+
+(define_insn "*call_value_reg_thumb2"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNUM))]
+ "TARGET_THUMB2"
+ "blx\\t%1"
+ [(set_attr "type" "call")]
+)
+
+(define_insn "*thumb2_indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "register_operand" "l*r"))]
+ "TARGET_THUMB2"
+ "bx\\t%0"
+ [(set_attr "conds" "clob")]
+)
+;; Don't define thumb2_load_indirect_jump because we can't guarantee label
+;; addresses will have the thumb bit set correctly.
+
+
+;; Patterns to allow combination of arithmetic, cond code and shifts
+
+(define_insn "*thumb2_arith_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "const_int_operand" "M")])
+ (match_operand:SI 2 "s_register_operand" "r")]))]
+ "TARGET_THUMB2"
+ "%i1%?\\t%0, %2, %4%S3"
+ [(set_attr "predicable" "yes")
+ (set_attr "shift" "4")
+ (set_attr "type" "alu_shift")]
+)
+
+;; ??? What does this splitter do? Copied from the ARM version
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 2 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "")
+ (match_operand:SI 5 "const_int_operand" "")])
+ (match_operand:SI 6 "s_register_operand" "")])
+ (match_operand:SI 7 "arm_rhs_operand" "")]))
+ (clobber (match_operand:SI 8 "s_register_operand" ""))]
+ "TARGET_32BIT"
+ [(set (match_dup 8)
+ (match_op_dup 2 [(match_op_dup 3 [(match_dup 4) (match_dup 5)])
+ (match_dup 6)]))
+ (set (match_dup 0)
+ (match_op_dup 1 [(match_dup 8) (match_dup 7)]))]
+ "")
+
+(define_insn "*thumb2_arith_shiftsi_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "const_int_operand" "M")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 1 [(match_op_dup 3 [(match_dup 4) (match_dup 5)])
+ (match_dup 2)]))]
+ "TARGET_32BIT"
+ "%i1%.\\t%0, %2, %4%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "4")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_arith_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "const_int_operand" "M")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "TARGET_THUMB2"
+ "%i1%.\\t%0, %2, %4%S3"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "4")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_sub_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "M")])))]
+ "TARGET_THUMB2"
+ "sub%?\\t%0, %1, %3%S2"
+ [(set_attr "predicable" "yes")
+ (set_attr "shift" "3")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_sub_shiftsi_compare0"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "M")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "TARGET_THUMB2"
+ "sub%.\\t%0, %1, %3%S2"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "3")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_sub_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV CC_REGNUM)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "M")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ "TARGET_THUMB2"
+ "sub%.\\t%0, %1, %3%S2"
+ [(set_attr "conds" "set")
+ (set_attr "shift" "3")
+ (set_attr "type" "alu_shift")]
+)
+
+(define_insn "*thumb2_and_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (match_operator:SI 1 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 2 "s_register_operand" "r")))]
+ "TARGET_THUMB2"
+ "ite\\t%D1\;mov%D1\\t%0, #0\;and%d1\\t%0, %2, #1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "10")]
+)
+
+(define_insn "*thumb2_ior_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ior:SI (match_operator:SI 2 "arm_comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ "TARGET_THUMB2"
+ "@
+ it\\t%d2\;orr%d2\\t%0, %1, #1
+ ite\\t%D2\;mov%D2\\t%0, %1\;orr%d2\\t%0, %1, #1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "6,10")]
+)
+
+(define_insn "*thumb2_compare_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 1 "arm_comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L")]))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "*
+ if (operands[3] == const0_rtx)
+ {
+ if (GET_CODE (operands[1]) == LT)
+ return \"lsr\\t%0, %2, #31\";
+
+ if (GET_CODE (operands[1]) == GE)
+ return \"mvn\\t%0, %2\;lsr\\t%0, %0, #31\";
+
+ if (GET_CODE (operands[1]) == EQ)
+ return \"rsbs\\t%0, %2, #1\;it\\tcc\;movcc\\t%0, #0\";
+ }
+
+ if (GET_CODE (operands[1]) == NE)
+ {
+ if (which_alternative == 1)
+ return \"adds\\t%0, %2, #%n3\;it\\tne\;movne\\t%0, #1\";
+ return \"subs\\t%0, %2, %3\;it\\tne\;movne\\t%0, #1\";
+ }
+ if (which_alternative == 1)
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ return \"ite\\t%D1\;mov%D1\\t%0, #0\;mov%d1\\t%0, #1\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "14")]
+)
+
+(define_insn "*thumb2_cond_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI (match_operator 3 "equality_operator"
+ [(match_operator 4 "arm_comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))]
+ "TARGET_THUMB2"
+ "*
+ if (GET_CODE (operands[3]) == NE)
+ {
+ if (which_alternative != 1)
+ output_asm_insn (\"it\\t%D4\;mov%D4\\t%0, %2\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"it\\t%d4\;mov%d4\\t%0, %1\", operands);
+ return \"\";
+ }
+ switch (which_alternative)
+ {
+ case 0:
+ output_asm_insn (\"it\\t%d4\", operands);
+ break;
+ case 1:
+ output_asm_insn (\"it\\t%D4\", operands);
+ break;
+ case 2:
+ output_asm_insn (\"ite\\t%D4\", operands);
+ break;
+ default:
+ abort();
+ }
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%d4\\t%0, %2\", operands);
+ return \"\";
+ "
+ [(set_attr "conds" "use")
+ (set_attr "length" "6,6,10")]
+)
+
+(define_insn "*thumb2_cond_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operator:SI 4 "arm_comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "*
+ if (GET_CODE (operands[4]) == LT && operands[3] == const0_rtx)
+ return \"%i5\\t%0, %1, %2, lsr #31\";
+
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (GET_CODE (operands[5]) == AND)
+ {
+ output_asm_insn (\"ite\\t%D4\", operands);
+ output_asm_insn (\"mov%D4\\t%0, #0\", operands);
+ }
+ else if (GET_CODE (operands[5]) == MINUS)
+ {
+ output_asm_insn (\"ite\\t%D4\", operands);
+ output_asm_insn (\"rsb%D4\\t%0, %1, #0\", operands);
+ }
+ else if (which_alternative != 0)
+ {
+ output_asm_insn (\"ite\\t%D4\", operands);
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ }
+ else
+ output_asm_insn (\"it\\t%d4\", operands);
+ return \"%i5%d4\\t%0, %1, #1\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "14")]
+)
+
+(define_insn "*thumb2_cond_sub"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 4 "arm_comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "*
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (which_alternative != 0)
+ {
+ output_asm_insn (\"ite\\t%D4\", operands);
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ }
+ else
+ output_asm_insn (\"it\\t%d4\", operands);
+ return \"sub%d4\\t%0, %1, #1\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "10,14")]
+)
+
+(define_insn "*thumb2_negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator 3 "arm_comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "*
+ if (GET_CODE (operands[3]) == LT && operands[3] == const0_rtx)
+ return \"asr\\t%0, %1, #31\";
+
+ if (GET_CODE (operands[3]) == NE)
+ return \"subs\\t%0, %1, %2\;it\\tne\;mvnne\\t%0, #0\";
+
+ if (GET_CODE (operands[3]) == GT)
+ return \"subs\\t%0, %1, %2\;it\\tne\;mvnne\\t%0, %0, asr #31\";
+
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ output_asm_insn (\"ite\\t%D3\", operands);
+ output_asm_insn (\"mov%D3\\t%0, #0\", operands);
+ return \"mvn%d3\\t%0, #0\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "14")]
+)
+
+(define_insn "*thumb2_movcond"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "arm_comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL,rIL")])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "*
+ if (GET_CODE (operands[5]) == LT
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"and\\t%0, %1, %3, asr #31\";
+ return \"ands\\t%0, %1, %3, asr #32\;it\\tcc\;movcc\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"bic\\t%0, %2, %3, asr #31\";
+ return \"bics\\t%0, %2, %3, asr #32\;it\\tcs\;movcs\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants. */
+ }
+
+ if (GET_CODE (operands[5]) == GE
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"bic\\t%0, %1, %3, asr #31\";
+ return \"bics\\t%0, %1, %3, asr #32\;it\\tcs\;movcs\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"and\\t%0, %2, %3, asr #31\";
+ return \"ands\\t%0, %2, %3, asr #32\;it\tcc\;movcc\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants. */
+ }
+ if (GET_CODE (operands[4]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[4])))
+ output_asm_insn (\"cmn\\t%3, #%n4\", operands);
+ else
+ output_asm_insn (\"cmp\\t%3, %4\", operands);
+ switch (which_alternative)
+ {
+ case 0:
+ output_asm_insn (\"it\\t%D5\", operands);
+ break;
+ case 1:
+ output_asm_insn (\"it\\t%d5\", operands);
+ break;
+ case 2:
+ output_asm_insn (\"ite\\t%d5\", operands);
+ break;
+ default:
+ abort();
+ }
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d5\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D5\\t%0, %2\", operands);
+ return \"\";
+ "
+ [(set_attr "conds" "clob")
+ (set_attr "length" "10,10,14")]
+)
+
+;; Zero and sign extension instructions.
+
+(define_insn "*thumb2_zero_extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_THUMB2"
+ "*
+ /* ??? Output both instructions unconditionally, otherwise the conditional
+ executon insn counter gets confused.
+ if (REGNO (operands[1])
+ != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0)) */
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, #0\";
+ "
+ [(set_attr "length" "8")
+ (set_attr "ce_count" "2")
+ (set_attr "predicable" "yes")]
+)
+
+(define_insn "*thumb2_zero_extendqidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r,r")
+ (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_THUMB2"
+ "@
+ and%?\\t%Q0, %1, #255\;mov%?\\t%R0, #0
+ ldr%(b%)\\t%Q0, %1\;mov%?\\t%R0, #0"
+ [(set_attr "length" "8")
+ (set_attr "ce_count" "2")
+ (set_attr "predicable" "yes")
+ (set_attr "type" "*,load_byte")
+ (set_attr "pool_range" "*,4092")
+ (set_attr "neg_pool_range" "*,250")]
+)
+
+(define_insn "*thumb2_extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (sign_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_THUMB2"
+ "*
+ /* ??? Output both instructions unconditionally, otherwise the conditional
+ executon insn counter gets confused.
+ if (REGNO (operands[1])
+ != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0)) */
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"asr%?\\t%R0, %Q0, #31\";
+ "
+ [(set_attr "length" "8")
+ (set_attr "ce_count" "2")
+ (set_attr "shift" "1")
+ (set_attr "predicable" "yes")]
+)
+
+;; All supported Thumb2 implementations are armv6, so only that case is
+;; provided.
+(define_insn "*thumb2_extendqisi_v6"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_THUMB2 && arm_arch6"
+ "@
+ sxtb%?\\t%0, %1
+ ldr%(sb%)\\t%0, %1"
+ [(set_attr "type" "alu_shift,load_byte")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "*,4096")
+ (set_attr "neg_pool_range" "*,250")]
+)
+
+(define_insn "*thumb2_zero_extendhisi2_v6"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_THUMB2 && arm_arch6"
+ "@
+ uxth%?\\t%0, %1
+ ldr%(h%)\\t%0, %1"
+ [(set_attr "type" "alu_shift,load_byte")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "*,4096")
+ (set_attr "neg_pool_range" "*,250")]
+)
+
+(define_insn "*thumb2_zero_extendqisi2_v6"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_THUMB2 && arm_arch6"
+ "@
+ uxtb%(%)\\t%0, %1
+ ldr%(b%)\\t%0, %1\\t%@ zero_extendqisi2"
+ [(set_attr "type" "alu_shift,load_byte")
+ (set_attr "predicable" "yes")
+ (set_attr "pool_range" "*,4096")
+ (set_attr "neg_pool_range" "*,250")]
+)
+
+;; APPLE LOCAL begin 6152801 SImode thumb2 switch table dispatch
+(define_insn "thumb2_casesi_internal"
+ [(parallel [(set (pc)
+ (if_then_else
+ (leu (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+ (label_ref (match_operand 2 "" ""))))
+ (label_ref (match_operand 3 "" ""))))
+ (clobber (reg:CC CC_REGNUM))
+ (clobber (match_scratch:SI 4 "=&r"))
+ (use (label_ref (match_dup 2)))])]
+ "TARGET_THUMB2"
+ "* return thumb2_output_casesi(operands);"
+ [(set_attr "conds" "clob")
+ (set_attr "length" "16")]
+)
+
+;; Removed thumb2_casesi_internal_pic
+;; APPLE LOCAL end 6152801 SImode thumb2 switch table dispatch
+
+(define_insn_and_split "thumb2_eh_return"
+ [(unspec_volatile [(match_operand:SI 0 "s_register_operand" "r")]
+ VUNSPEC_EH_RETURN)
+ (clobber (match_scratch:SI 1 "=&r"))]
+ "TARGET_THUMB2"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ "
+ {
+ thumb_set_return_address (operands[0], operands[1]);
+ DONE;
+ }"
+)
+
+;; Peepholes and insns for 16-bit flag clobbering instructions.
+;; The conditional forms of these instructions do not clobber CC.
+;; However by the time peepholes are run it is probably too late to do
+;; anything useful with this information.
+(define_peephole2
+ [(set (match_operand:SI 0 "low_register_operand" "")
+ (match_operator:SI 3 "thumb_16bit_operator"
+ [(match_operand:SI 1 "low_register_operand" "")
+ (match_operand:SI 2 "low_register_operand" "")]))]
+ "TARGET_THUMB2 && rtx_equal_p(operands[0], operands[1])
+ && peep2_regno_dead_p(0, CC_REGNUM)"
+ [(parallel
+ [(set (match_dup 0)
+ (match_op_dup 3
+ [(match_dup 1)
+ (match_dup 2)]))
+ (clobber (reg:CC CC_REGNUM))])]
+ ""
+)
+
+(define_insn "*thumb2_alusi3_short"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (match_operator:SI 3 "thumb_16bit_operator"
+ [(match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "l")]))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2 && reload_completed"
+ "%I3%!\\t%0, %1, %2"
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "2")]
+)
+
+;; Similarly for 16-bit shift instructions
+;; There is no 16-bit rotate by immediate instruction.
+(define_peephole2
+ [(set (match_operand:SI 0 "low_register_operand" "")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "low_register_operand" "")
+ (match_operand:SI 2 "low_reg_or_int_operand" "")]))]
+ "TARGET_THUMB2
+ && peep2_regno_dead_p(0, CC_REGNUM)
+ && ((GET_CODE(operands[3]) != ROTATE && GET_CODE(operands[3]) != ROTATERT)
+ || REG_P(operands[2]))"
+ [(parallel
+ [(set (match_dup 0)
+ (match_op_dup 3
+ [(match_dup 1)
+ (match_dup 2)]))
+ (clobber (reg:CC CC_REGNUM))])]
+ ""
+)
+
+(define_insn "*thumb2_shiftsi3_short"
+ [(set (match_operand:SI 0 "low_register_operand" "=l")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "low_register_operand" "l")
+ (match_operand:SI 2 "low_reg_or_int_operand" "lM")]))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2 && reload_completed
+ && ((GET_CODE(operands[3]) != ROTATE && GET_CODE(operands[3]) != ROTATERT)
+ || REG_P(operands[2]))"
+ "* return arm_output_shift(operands, 2);"
+ [(set_attr "predicable" "yes")
+ (set_attr "shift" "1")
+ (set_attr "length" "2")
+ (set (attr "type") (if_then_else (match_operand 2 "const_int_operand" "")
+ (const_string "alu_shift")
+ (const_string "alu_shift_reg")))]
+)
+
+;; 16-bit load immediate
+(define_peephole2
+ [(set (match_operand:SI 0 "low_register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "TARGET_THUMB2
+ && peep2_regno_dead_p(0, CC_REGNUM)
+ && (unsigned HOST_WIDE_INT) INTVAL(operands[1]) < 256"
+ [(parallel
+ [(set (match_dup 0)
+ (match_dup 1))
+ (clobber (reg:CC CC_REGNUM))])]
+ ""
+)
+
+(define_insn "*thumb2_movsi_shortim"
+ [(set (match_operand:SI 0 "low_register_operand" "=l")
+ (match_operand:SI 1 "const_int_operand" "I"))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2 && reload_completed"
+ "mov%!\t%0, %1"
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "2")]
+)
+
+;; 16-bit add/sub immediate
+(define_peephole2
+ [(set (match_operand:SI 0 "low_register_operand" "")
+ (plus:SI (match_operand:SI 1 "low_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "TARGET_THUMB2
+ && peep2_regno_dead_p(0, CC_REGNUM)
+ && ((rtx_equal_p(operands[0], operands[1])
+ && INTVAL(operands[2]) > -256 && INTVAL(operands[2]) < 256)
+ || (INTVAL(operands[2]) > -8 && INTVAL(operands[2]) < 8))"
+ [(parallel
+ [(set (match_dup 0)
+ (plus:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:CC CC_REGNUM))])]
+ ""
+)
+
+(define_insn "*thumb2_addsi_shortim"
+ [(set (match_operand:SI 0 "low_register_operand" "=l")
+ (plus:SI (match_operand:SI 1 "low_register_operand" "l")
+ (match_operand:SI 2 "const_int_operand" "IL")))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2 && reload_completed"
+ "*
+ HOST_WIDE_INT val;
+
+ val = INTVAL(operands[2]);
+ /* We prefer eg. subs rn, rn, #1 over adds rn, rn, #0xffffffff. */
+ if (val < 0 && const_ok_for_arm(ARM_SIGN_EXTEND (-val)))
+ return \"sub%!\\t%0, %1, #%n2\";
+ else
+ return \"add%!\\t%0, %1, %2\";
+ "
+ [(set_attr "predicable" "yes")
+ (set_attr "length" "2")]
+)
+
+;; APPLE LOCAL begin 6261739 incorrect insn lengths
+(define_insn "*thumb2_cbz"
+ [(set (pc) (if_then_else
+ (eq (match_operand:SI 0 "s_register_operand" "l,?r")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "*
+ if (get_attr_length (insn) == 2 && which_alternative == 0)
+ return \"cbz\\t%0, %l1\";
+ else
+ return \"cmp\\t%0, #0\;beq\\t%l1\";
+ "
+ [(set (attr "length")
+ (if_then_else
+ (and (and (ge (minus (match_dup 1) (pc)) (const_int 2))
+ (le (minus (match_dup 1) (pc)) (const_int 128)))
+ (match_operand:SI 0 "low_register_operand" ""))
+ (const_int 2)
+ (const_int 8)))]
+)
+
+(define_insn "*thumb2_cbnz"
+ [(set (pc) (if_then_else
+ (ne (match_operand:SI 0 "s_register_operand" "l,?r")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2"
+ "*
+ if (get_attr_length (insn) == 2 && which_alternative == 0)
+ return \"cbnz\\t%0, %l1\";
+ else
+ return \"cmp\\t%0, #0\;bne\\t%l1\";
+ "
+ [(set (attr "length")
+ (if_then_else
+ (and (and (ge (minus (match_dup 1) (pc)) (const_int 2))
+ (le (minus (match_dup 1) (pc)) (const_int 128)))
+ (match_operand:SI 0 "low_register_operand" ""))
+ (const_int 2)
+ (const_int 8)))]
+)
+;; APPLE LOCAL end 6261739 incorrect insn lengths
Modified: llvm-gcc-4.2/trunk/gcc/config/arm/unwind-arm.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/unwind-arm.c?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/unwind-arm.c (original)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/unwind-arm.c Wed Jul 22 15:36:27 2009
@@ -41,6 +41,8 @@
bool __attribute__((weak)) __cxa_begin_cleanup(_Unwind_Control_Block *ucbp);
bool __attribute__((weak)) __cxa_type_match(_Unwind_Control_Block *ucbp,
const type_info *rttip,
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+ bool is_reference,
void **matched_object);
_Unwind_Ptr __attribute__((weak))
@@ -73,6 +75,15 @@
_uw pad;
};
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+struct vfpv3_regs
+{
+ /* Always populated via VSTM, so no need for the "pad" field from
+ vfp_regs (which is used to store the format word for FSTMX). */
+ _uw64 d[16];
+};
+
+/* APPLE LOCAL end v7 support. Merge from mainline */
struct fpa_reg
{
_uw w[3];
@@ -83,6 +94,18 @@
struct fpa_reg f[8];
};
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+struct wmmxd_regs
+{
+ _uw64 wd[16];
+};
+
+struct wmmxc_regs
+{
+ _uw wc[4];
+};
+
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
/* Unwind descriptors. */
typedef struct
@@ -113,10 +136,27 @@
struct core_regs core;
_uw prev_sp; /* Only valid during forced unwinding. */
struct vfp_regs vfp;
+ /* APPLE LOCAL v7 support. Merge from mainline */
+ struct vfpv3_regs vfp_regs_16_to_31;
struct fpa_regs fpa;
+ /* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ struct wmmxd_regs wmmxd;
+ struct wmmxc_regs wmmxc;
+ /* APPLE LOCAL end v7 support. Merge from Codesourcery */
} phase1_vrs;
-#define DEMAND_SAVE_VFP 1
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+#define DEMAND_SAVE_VFP 1 /* VFP state has been saved if not set */
+#define DEMAND_SAVE_VFP_D 2 /* VFP state is for FLDMD/FSTMD if set */
+#define DEMAND_SAVE_VFP_V3 4 /* VFPv3 state for regs 16 .. 31 has
+ been saved if not set */
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+#define DEMAND_SAVE_WMMXD 8 /* iWMMXt data registers have been
+ saved if not set. */
+#define DEMAND_SAVE_WMMXC 16 /* iWMMXt control registers have been
+ saved if not set. */
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
/* This must match the structure created by the assembly wrappers. */
typedef struct
@@ -142,15 +182,51 @@
/* Coprocessor register state manipulation functions. */
+/* APPLE LOCAL v7 support. Merge from mainline */
+/* Routines for FLDMX/FSTMX format... */
void __gnu_Unwind_Save_VFP (struct vfp_regs * p);
void __gnu_Unwind_Restore_VFP (struct vfp_regs * p);
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+void __gnu_Unwind_Save_WMMXD (struct wmmxd_regs * p);
+void __gnu_Unwind_Restore_WMMXD (struct wmmxd_regs * p);
+void __gnu_Unwind_Save_WMMXC (struct wmmxc_regs * p);
+void __gnu_Unwind_Restore_WMMXC (struct wmmxc_regs * p);
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
+
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+/* ...and those for FLDMD/FSTMD format... */
+void __gnu_Unwind_Save_VFP_D (struct vfp_regs * p);
+void __gnu_Unwind_Restore_VFP_D (struct vfp_regs * p);
+
+/* ...and those for VLDM/VSTM format, saving/restoring only registers
+ 16 through 31. */
+void __gnu_Unwind_Save_VFP_D_16_to_31 (struct vfpv3_regs * p);
+void __gnu_Unwind_Restore_VFP_D_16_to_31 (struct vfpv3_regs * p);
+/* APPLE LOCAL end v7 support. Merge from mainline */
/* Restore coprocessor state after phase1 unwinding. */
static void
restore_non_core_regs (phase1_vrs * vrs)
{
+/* APPLE LOCAL begin v7 support. Merge from mainline */
if ((vrs->demand_save_flags & DEMAND_SAVE_VFP) == 0)
- __gnu_Unwind_Restore_VFP (&vrs->vfp);
+ {
+ if (vrs->demand_save_flags & DEMAND_SAVE_VFP_D)
+ __gnu_Unwind_Restore_VFP_D (&vrs->vfp);
+ else
+ __gnu_Unwind_Restore_VFP (&vrs->vfp);
+ }
+
+ if ((vrs->demand_save_flags & DEMAND_SAVE_VFP_V3) == 0)
+ __gnu_Unwind_Restore_VFP_D_16_to_31 (&vrs->vfp_regs_16_to_31);
+/* APPLE LOCAL end v7 support. Merge from mainline */
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+
+ if ((vrs->demand_save_flags & DEMAND_SAVE_WMMXD) == 0)
+ __gnu_Unwind_Restore_WMMXD (&vrs->wmmxd);
+ if ((vrs->demand_save_flags & DEMAND_SAVE_WMMXC) == 0)
+ __gnu_Unwind_Restore_WMMXC (&vrs->wmmxc);
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
}
/* A better way to do this would probably be to compare the absolute address
@@ -273,35 +349,102 @@
_uw start = discriminator >> 16;
_uw count = discriminator & 0xffff;
struct vfp_regs tmp;
+/* APPLE LOCAL begin v7 support. Merge from mainline */
+ struct vfpv3_regs tmp_16_to_31;
+ int tmp_count;
_uw *sp;
_uw *dest;
+ int num_vfpv3_regs = 0;
+ /* We use an approximation here by bounding _UVRSD_DOUBLE
+ register numbers at 32 always, since we can't detect if
+ VFPv3 isn't present (in such a case the upper limit is 16). */
if ((representation != _UVRSD_VFPX && representation != _UVRSD_DOUBLE)
- || start + count > 16)
+ || start + count > (representation == _UVRSD_VFPX ? 16 : 32)
+ || (representation == _UVRSD_VFPX && start >= 16))
return _UVRSR_FAILED;
- if (vrs->demand_save_flags & DEMAND_SAVE_VFP)
+ /* Check if we're being asked to pop VFPv3-only registers
+ (numbers 16 through 31). */
+ if (start >= 16)
+ num_vfpv3_regs = count;
+ else if (start + count > 16)
+ num_vfpv3_regs = start + count - 16;
+
+ if (num_vfpv3_regs && representation != _UVRSD_DOUBLE)
+ return _UVRSR_FAILED;
+
+ /* Demand-save coprocessor registers for stage1. */
+ if (start < 16 && (vrs->demand_save_flags & DEMAND_SAVE_VFP))
{
- /* Demand-save resisters for stage1. */
vrs->demand_save_flags &= ~DEMAND_SAVE_VFP;
- __gnu_Unwind_Save_VFP (&vrs->vfp);
+
+ if (representation == _UVRSD_DOUBLE)
+ {
+ /* Save in FLDMD/FSTMD format. */
+ vrs->demand_save_flags |= DEMAND_SAVE_VFP_D;
+ __gnu_Unwind_Save_VFP_D (&vrs->vfp);
+ }
+ else
+ {
+ /* Save in FLDMX/FSTMX format. */
+ vrs->demand_save_flags &= ~DEMAND_SAVE_VFP_D;
+ __gnu_Unwind_Save_VFP (&vrs->vfp);
+ }
+ }
+
+ if (num_vfpv3_regs > 0
+ && (vrs->demand_save_flags & DEMAND_SAVE_VFP_V3))
+ {
+ vrs->demand_save_flags &= ~DEMAND_SAVE_VFP_V3;
+ __gnu_Unwind_Save_VFP_D_16_to_31 (&vrs->vfp_regs_16_to_31);
}
/* Restore the registers from the stack. Do this by saving the
current VFP registers to a memory area, moving the in-memory
values into that area, and restoring from the whole area.
For _UVRSD_VFPX we assume FSTMX standard format 1. */
- __gnu_Unwind_Save_VFP (&tmp);
+ if (representation == _UVRSD_VFPX)
+ __gnu_Unwind_Save_VFP (&tmp);
+ else
+ {
+ /* Save registers 0 .. 15 if required. */
+ if (start < 16)
+ __gnu_Unwind_Save_VFP_D (&tmp);
+
+ /* Save VFPv3 registers 16 .. 31 if required. */
+ if (num_vfpv3_regs)
+ __gnu_Unwind_Save_VFP_D_16_to_31 (&tmp_16_to_31);
+ }
- /* The stack address is only guaranteed to be word aligned, so
+ /* Work out how many registers below register 16 need popping. */
+ tmp_count = num_vfpv3_regs > 0 ? 16 - start : count;
+
+ /* Copy registers below 16, if needed.
+ The stack address is only guaranteed to be word aligned, so
we can't use doubleword copies. */
sp = (_uw *) vrs->core.r[R_SP];
- dest = (_uw *) &tmp.d[start];
- count *= 2;
- while (count--)
- *(dest++) = *(sp++);
+ if (tmp_count > 0)
+ {
+ tmp_count *= 2;
+ dest = (_uw *) &tmp.d[start];
+ while (tmp_count--)
+ *(dest++) = *(sp++);
+ }
+
+ /* Copy VFPv3 registers numbered >= 16, if needed. */
+ if (num_vfpv3_regs > 0)
+ {
+ /* num_vfpv3_regs is needed below, so copy it. */
+ int tmp_count_2 = num_vfpv3_regs * 2;
+ int vfpv3_start = start < 16 ? 16 : start;
+
+ dest = (_uw *) &tmp_16_to_31.d[vfpv3_start - 16];
+ while (tmp_count_2--)
+ *(dest++) = *(sp++);
+ }
- /* Skip the pad word */
+ /* Skip the format word space if using FLDMX/FSTMX format. */
if (representation == _UVRSD_VFPX)
sp++;
@@ -309,15 +452,100 @@
vrs->core.r[R_SP] = (_uw) sp;
/* Reload the registers. */
- __gnu_Unwind_Restore_VFP (&tmp);
+ if (representation == _UVRSD_VFPX)
+ __gnu_Unwind_Restore_VFP (&tmp);
+ else
+ {
+ /* Restore registers 0 .. 15 if required. */
+ if (start < 16)
+ __gnu_Unwind_Restore_VFP_D (&tmp);
+
+ /* Restore VFPv3 registers 16 .. 31 if required. */
+ if (num_vfpv3_regs > 0)
+ __gnu_Unwind_Restore_VFP_D_16_to_31 (&tmp_16_to_31);
+ }
+/* APPLE LOCAL end v7 support. Merge from mainline */
}
return _UVRSR_OK;
case _UVRSC_FPA:
+/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
+ return _UVRSR_NOT_IMPLEMENTED;
+
case _UVRSC_WMMXD:
+ {
+ _uw start = discriminator >> 16;
+ _uw count = discriminator & 0xffff;
+ struct wmmxd_regs tmp;
+ _uw *sp;
+ _uw *dest;
+
+ if ((representation != _UVRSD_UINT64) || start + count > 16)
+ return _UVRSR_FAILED;
+
+ if (vrs->demand_save_flags & DEMAND_SAVE_WMMXD)
+ {
+ /* Demand-save resisters for stage1. */
+ vrs->demand_save_flags &= ~DEMAND_SAVE_WMMXD;
+ __gnu_Unwind_Save_WMMXD (&vrs->wmmxd);
+ }
+
+ /* Restore the registers from the stack. Do this by saving the
+ current WMMXD registers to a memory area, moving the in-memory
+ values into that area, and restoring from the whole area. */
+ __gnu_Unwind_Save_WMMXD (&tmp);
+
+ /* The stack address is only guaranteed to be word aligned, so
+ we can't use doubleword copies. */
+ sp = (_uw *) vrs->core.r[R_SP];
+ dest = (_uw *) &tmp.wd[start];
+ count *= 2;
+ while (count--)
+ *(dest++) = *(sp++);
+
+ /* Set the new stack pointer. */
+ vrs->core.r[R_SP] = (_uw) sp;
+
+ /* Reload the registers. */
+ __gnu_Unwind_Restore_WMMXD (&tmp);
+ }
+ return _UVRSR_OK;
+
case _UVRSC_WMMXC:
- return _UVRSR_NOT_IMPLEMENTED;
+ {
+ int i;
+ struct wmmxc_regs tmp;
+ _uw *sp;
+ if ((representation != _UVRSD_UINT32) || discriminator > 16)
+ return _UVRSR_FAILED;
+
+ if (vrs->demand_save_flags & DEMAND_SAVE_WMMXC)
+ {
+ /* Demand-save resisters for stage1. */
+ vrs->demand_save_flags &= ~DEMAND_SAVE_WMMXC;
+ __gnu_Unwind_Save_WMMXC (&vrs->wmmxc);
+ }
+
+ /* Restore the registers from the stack. Do this by saving the
+ current WMMXC registers to a memory area, moving the in-memory
+ values into that area, and restoring from the whole area. */
+ __gnu_Unwind_Save_WMMXC (&tmp);
+
+ sp = (_uw *) vrs->core.r[R_SP];
+ for (i = 0; i < 4; i++)
+ if (discriminator & (1 << i))
+ tmp.wc[i] = *(sp++);
+
+ /* Set the new stack pointer. */
+ vrs->core.r[R_SP] = (_uw) sp;
+
+ /* Reload the registers. */
+ __gnu_Unwind_Restore_WMMXC (&tmp);
+ }
+ return _UVRSR_OK;
+
+/* APPLE LOCAL end v7 support. Merge from Codesourcery */
default:
return _UVRSR_FAILED;
}
@@ -847,6 +1075,8 @@
{
/* Check for a barrier. */
_uw rtti;
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+ bool is_reference = (data[0] & uint32_highbit) != 0;
void *matched;
/* Check for no-throw areas. */
@@ -860,6 +1090,8 @@
/* Match a catch specification. */
rtti = _Unwind_decode_target2 ((_uw) &data[1]);
if (!__cxa_type_match (ucbp, (type_info *) rtti,
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+ is_reference,
&matched))
matched = (void *)0;
}
@@ -907,7 +1139,8 @@
{
matched = (void *)(ucbp + 1);
rtti = _Unwind_decode_target2 ((_uw) &data[i + 1]);
- if (__cxa_type_match (ucbp, (type_info *) rtti,
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+ if (__cxa_type_match (ucbp, (type_info *) rtti, 0,
&matched))
break;
}
@@ -1001,16 +1234,5 @@
{
return __gnu_unwind_pr_common (state, ucbp, context, 2);
}
-
-/* These two should never be used. */
-_Unwind_Ptr
-_Unwind_GetDataRelBase (_Unwind_Context *context __attribute__ ((unused)))
-{
- abort ();
-}
-
-_Unwind_Ptr
-_Unwind_GetTextRelBase (_Unwind_Context *context __attribute__ ((unused)))
-{
- abort ();
-}
+/* APPLE LOCAL v7 support. Merge from Codesourcery */
+/* Removed lines */
Added: llvm-gcc-4.2/trunk/gcc/config/arm/vec-common.md
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/vec-common.md?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/vec-common.md (added)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/vec-common.md Wed Jul 22 15:36:27 2009
@@ -0,0 +1,108 @@
+;; APPLE LOCAL file v7 support. Merge from Codesourcery
+;; Machine Description for shared bits common to IWMMXT and Neon.
+;; Copyright (C) 2006 Free Software Foundation, Inc.
+;; Written by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the Free
+;; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+;; 02110-1301, USA.
+
+;; Vector Moves
+
+;; All integer and float modes supported by Neon and IWMMXT.
+(define_mode_macro VALL [V2DI V2SI V4HI V8QI V2SF V4SI V8HI V16QI V4SF])
+
+;; All integer and float modes supported by Neon and IWMMXT, except V2DI.
+(define_mode_macro VALLW [V2SI V4HI V8QI V2SF V4SI V8HI V16QI V4SF])
+
+;; All integer modes supported by Neon and IWMMXT
+(define_mode_macro VINT [V2DI V2SI V4HI V8QI V4SI V8HI V16QI])
+
+;; All integer modes supported by Neon and IWMMXT, except V2DI
+(define_mode_macro VINTW [V2SI V4HI V8QI V4SI V8HI V16QI])
+
+(define_expand "mov<mode>"
+ [(set (match_operand:VALL 0 "nonimmediate_operand" "")
+ (match_operand:VALL 1 "general_operand" ""))]
+ "TARGET_NEON
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
+;; Vector arithmetic. Expanders are blank, then unnamed insns implement
+;; patterns seperately for IWMMXT and Neon.
+
+(define_expand "add<mode>3"
+ [(set (match_operand:VALL 0 "s_register_operand" "")
+ (plus:VALL (match_operand:VALL 1 "s_register_operand" "")
+ (match_operand:VALL 2 "s_register_operand" "")))]
+ "TARGET_NEON
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
+(define_expand "sub<mode>3"
+ [(set (match_operand:VALL 0 "s_register_operand" "")
+ (minus:VALL (match_operand:VALL 1 "s_register_operand" "")
+ (match_operand:VALL 2 "s_register_operand" "")))]
+ "TARGET_NEON
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
+(define_expand "mul<mode>3"
+ [(set (match_operand:VALLW 0 "s_register_operand" "")
+ (mult:VALLW (match_operand:VALLW 1 "s_register_operand" "")
+ (match_operand:VALLW 2 "s_register_operand" "")))]
+ "TARGET_NEON || (<MODE>mode == V4HImode && TARGET_REALLY_IWMMXT)"
+{
+})
+
+(define_expand "smin<mode>3"
+ [(set (match_operand:VALLW 0 "s_register_operand" "")
+ (smin:VALLW (match_operand:VALLW 1 "s_register_operand" "")
+ (match_operand:VALLW 2 "s_register_operand" "")))]
+ "TARGET_NEON
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
+(define_expand "umin<mode>3"
+ [(set (match_operand:VINTW 0 "s_register_operand" "")
+ (umin:VINTW (match_operand:VINTW 1 "s_register_operand" "")
+ (match_operand:VINTW 2 "s_register_operand" "")))]
+ "TARGET_NEON
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
+(define_expand "smax<mode>3"
+ [(set (match_operand:VALLW 0 "s_register_operand" "")
+ (smax:VALLW (match_operand:VALLW 1 "s_register_operand" "")
+ (match_operand:VALLW 2 "s_register_operand" "")))]
+ "TARGET_NEON
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
+
+(define_expand "umax<mode>3"
+ [(set (match_operand:VINTW 0 "s_register_operand" "")
+ (umax:VINTW (match_operand:VINTW 1 "s_register_operand" "")
+ (match_operand:VINTW 2 "s_register_operand" "")))]
+ "TARGET_NEON
+ || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
+{
+})
Modified: llvm-gcc-4.2/trunk/gcc/config/arm/vfp.md
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/vfp.md?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/vfp.md (original)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/vfp.md Wed Jul 22 15:36:27 2009
@@ -1,5 +1,6 @@
;; ARM VFP coprocessor Machine Description
-;; Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+;; APPLE LOCAL v7 support. Merge from mainline
+;; Copyright (C) 2003, 2005, 2006, 2007 Free Software Foundation, Inc.
;; Written by CodeSourcery, LLC.
;;
;; This file is part of GCC.
@@ -21,48 +22,18 @@
;; Additional register numbers
(define_constants
- [(VFPCC_REGNUM 95)]
+;; APPLE LOCAL v7 support. Merge from mainline
+ [(VFPCC_REGNUM 127)]
)
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-;; Pipeline description
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-
-(define_automaton "vfp11")
-
-;; There are 3 pipelines in the VFP11 unit.
-;;
-;; - A 8-stage FMAC pipeline (7 execute + writeback) with forward from
-;; fourth stage for simple operations.
-;;
-;; - A 5-stage DS pipeline (4 execute + writeback) for divide/sqrt insns.
-;; These insns also uses first execute stage of FMAC pipeline.
-;;
-;; - A 4-stage LS pipeline (execute + 2 memory + writeback) with forward from
-;; second memory stage for loads.
-
-;; We do not model Write-After-Read hazards.
-;; We do not do write scheduling with the arm core, so it is only necessary
-;; to model the first stage of each pipeline
-;; ??? Need to model LS pipeline properly for load/store multiple?
-;; We do not model fmstat properly. This could be done by modeling pipelines
-;; properly and defining an absence set between a dummy fmstat unit and all
-;; other vfp units.
-
-(define_cpu_unit "fmac" "vfp11")
-
-(define_cpu_unit "ds" "vfp11")
-
-(define_cpu_unit "vfp_ls" "vfp11")
-
-(define_cpu_unit "fmstat" "vfp11")
-
-(exclusion_set "fmac,ds" "fmstat")
+;; APPLE LOCAL begin v7 support. Merge from Codesourcery
;; The VFP "type" attributes differ from those used in the FPA model.
;; ffarith Fast floating point insns, e.g. abs, neg, cpy, cmp.
;; farith Most arithmetic insns.
-;; fmul Double precision multiply.
+;; fmuls Single precision multiply.
+;; fmuld Double precision multiply.
+;; fmacs Single precision multiply-accumulate.
+;; fmacd Double precision multiply-accumulate.
;; fdivs Single precision sqrt or division.
;; fdivd Double precision sqrt or division.
;; f_flag fmstat operation
@@ -72,74 +43,87 @@
;; r_2_f Transfer arm to vfp reg.
;; f_cvt Convert floating<->integral
-(define_insn_reservation "vfp_ffarith" 4
- (and (eq_attr "generic_vfp" "yes")
- (eq_attr "type" "ffarith"))
- "fmac")
-
-(define_insn_reservation "vfp_farith" 8
- (and (eq_attr "generic_vfp" "yes")
- (eq_attr "type" "farith,f_cvt"))
- "fmac")
-
-(define_insn_reservation "vfp_fmul" 9
- (and (eq_attr "generic_vfp" "yes")
- (eq_attr "type" "fmul"))
- "fmac*2")
-
-(define_insn_reservation "vfp_fdivs" 19
- (and (eq_attr "generic_vfp" "yes")
- (eq_attr "type" "fdivs"))
- "ds*15")
-
-(define_insn_reservation "vfp_fdivd" 33
- (and (eq_attr "generic_vfp" "yes")
- (eq_attr "type" "fdivd"))
- "fmac+ds*29")
-
-;; Moves to/from arm regs also use the load/store pipeline.
-(define_insn_reservation "vfp_fload" 4
- (and (eq_attr "generic_vfp" "yes")
- (eq_attr "type" "f_loads,f_loadd,r_2_f"))
- "vfp_ls")
-
-(define_insn_reservation "vfp_fstore" 4
- (and (eq_attr "generic_vfp" "yes")
- (eq_attr "type" "f_stores,f_stored,f_2_r"))
- "vfp_ls")
-
-(define_insn_reservation "vfp_to_cpsr" 4
- (and (eq_attr "generic_vfp" "yes")
- (eq_attr "type" "f_flag"))
- "fmstat,vfp_ls*3")
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-;; Insn pattern
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
+;; APPLE LOCAL begin v7 support. Merge from mainline
;; SImode moves
;; ??? For now do not allow loading constants into vfp regs. This causes
;; problems because small constants get converted into adds.
(define_insn "*arm_movsi_vfp"
- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r ,m,*w,r,*w,*w, *Uv")
- (match_operand:SI 1 "general_operand" "rI,K,mi,r,r,*w,*w,*Uvi,*w"))]
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r ,m,*t,r,*t,*t, *Uv")
+ (match_operand:SI 1 "general_operand" "rI,K,N,mi,r,r,*t,*t,*Uvi,*t"))]
"TARGET_ARM && TARGET_VFP && TARGET_HARD_FLOAT
&& ( s_register_operand (operands[0], SImode)
|| s_register_operand (operands[1], SImode))"
- "@
- mov%?\\t%0, %1
- mvn%?\\t%0, #%B1
- ldr%?\\t%0, %1
- str%?\\t%1, %0
- fmsr%?\\t%0, %1\\t%@ int
- fmrs%?\\t%0, %1\\t%@ int
- fcpys%?\\t%0, %1\\t%@ int
- flds%?\\t%0, %1\\t%@ int
- fsts%?\\t%1, %0\\t%@ int"
- [(set_attr "predicable" "yes")
- (set_attr "type" "*,*,load1,store1,r_2_f,f_2_r,ffarith,f_loads,f_stores")
- (set_attr "pool_range" "*,*,4096,*,*,*,*,1020,*")
- (set_attr "neg_pool_range" "*,*,4084,*,*,*,*,1008,*")]
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ return \"mov%?\\t%0, %1\";
+ case 1:
+ return \"mvn%?\\t%0, #%B1\";
+ case 2:
+ return \"movw%?\\t%0, %1\";
+ case 3:
+ return \"ldr%?\\t%0, %1\";
+ case 4:
+ return \"str%?\\t%1, %0\";
+ case 5:
+ return \"fmsr%?\\t%0, %1\\t%@ int\";
+ case 6:
+ return \"fmrs%?\\t%0, %1\\t%@ int\";
+ case 7:
+ return \"fcpys%?\\t%0, %1\\t%@ int\";
+ case 8: case 9:
+ return output_move_vfp (operands);
+ default:
+ gcc_unreachable ();
+ }
+ "
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "*,*,*,load1,store1,r_2_f,f_2_r,ffarith,f_loads,f_stores")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov,mvn,mov,*,*,*,*,*,*,*")
+ (set_attr "pool_range" "*,*,*,4096,*,*,*,*,1020,*")
+ (set_attr "neg_pool_range" "*,*,*,4084,*,*,*,*,1008,*")]
+)
+
+(define_insn "*thumb2_movsi_vfp"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,m,*t,r,*t,*t, *Uv")
+ (match_operand:SI 1 "general_operand" "rI,K,N,mi,r,r,*t,*t,*Uvi,*t"))]
+ "TARGET_THUMB2 && TARGET_VFP && TARGET_HARD_FLOAT
+ && ( s_register_operand (operands[0], SImode)
+ || s_register_operand (operands[1], SImode))"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ return \"mov%?\\t%0, %1\";
+ case 1:
+ return \"mvn%?\\t%0, #%B1\";
+ case 2:
+ return \"movw%?\\t%0, %1\";
+ case 3:
+ return \"ldr%?\\t%0, %1\";
+ case 4:
+ return \"str%?\\t%1, %0\";
+ case 5:
+ return \"fmsr%?\\t%0, %1\\t%@ int\";
+ case 6:
+ return \"fmrs%?\\t%0, %1\\t%@ int\";
+ case 7:
+ return \"fcpys%?\\t%0, %1\\t%@ int\";
+ case 8: case 9:
+ return output_move_vfp (operands);
+ default:
+ gcc_unreachable ();
+ }
+ "
+ [(set_attr "predicable" "yes")
+ (set_attr "type" "*,*,*,load1,store1,r_2_f,f_2_r,ffarith,f_load,f_store")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "mov,mvn,mov,*,*,*,*,*,*,*")
+ (set_attr "pool_range" "*,*,*,4096,*,*,*,*,1020,*")
+ (set_attr "neg_pool_range" "*,*,*, 0,*,*,*,*,1008,*")]
)
@@ -165,10 +149,8 @@
return \"fmrrd%?\\t%Q0, %R0, %P1\\t%@ int\";
case 5:
return \"fcpyd%?\\t%P0, %P1\\t%@ int\";
- case 6:
- return \"fldd%?\\t%P0, %1\\t%@ int\";
- case 7:
- return \"fstd%?\\t%P1, %0\\t%@ int\";
+ case 6: case 7:
+ return output_move_vfp (operands);
default:
gcc_unreachable ();
}
@@ -179,38 +161,118 @@
(set_attr "neg_pool_range" "*,1008,*,*,*,*,1008,*")]
)
+(define_insn "*thumb2_movdi_vfp"
+ [(set (match_operand:DI 0 "nonimmediate_di_operand" "=r, r,m,w,r,w,w, Uv")
+ (match_operand:DI 1 "di_operand" "rIK,mi,r,r,w,w,Uvi,w"))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_VFP"
+ "*
+ switch (which_alternative)
+ {
+ case 0: case 1: case 2:
+ return (output_move_double (operands));
+ case 3:
+ return \"fmdrr%?\\t%P0, %Q1, %R1\\t%@ int\";
+ case 4:
+ return \"fmrrd%?\\t%Q0, %R0, %P1\\t%@ int\";
+ case 5:
+ return \"fcpyd%?\\t%P0, %P1\\t%@ int\";
+ case 6: case 7:
+ return output_move_vfp (operands);
+ default:
+ abort ();
+ }
+ "
+ [(set_attr "type" "*,load2,store2,r_2_f,f_2_r,ffarith,f_load,f_store")
+ (set_attr "length" "8,8,8,4,4,4,4,4")
+ (set_attr "pool_range" "*,4096,*,*,*,*,1020,*")
+ (set_attr "neg_pool_range" "*, 0,*,*,*,*,1008,*")]
+)
+
;; SFmode moves
;; Disparage the w<->r cases because reloading an invalid address is
;; preferable to loading the value via integer registers.
(define_insn "*movsf_vfp"
- [(set (match_operand:SF 0 "nonimmediate_operand" "=w,?r,w ,Uv,r ,m,w,r")
- (match_operand:SF 1 "general_operand" " ?r,w,UvE,w, mE,r,w,r"))]
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=t,?r,t ,t ,Uv,r ,m,t,r")
+ (match_operand:SF 1 "general_operand" " ?r,t,Dv,UvE,t, mE,r,t,r"))]
"TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP
&& ( s_register_operand (operands[0], SFmode)
|| s_register_operand (operands[1], SFmode))"
- "@
- fmsr%?\\t%0, %1
- fmrs%?\\t%0, %1
- flds%?\\t%0, %1
- fsts%?\\t%1, %0
- ldr%?\\t%0, %1\\t%@ float
- str%?\\t%1, %0\\t%@ float
- fcpys%?\\t%0, %1
- mov%?\\t%0, %1\\t%@ float"
- [(set_attr "predicable" "yes")
- (set_attr "type" "r_2_f,f_2_r,ffarith,*,f_loads,f_stores,load1,store1")
- (set_attr "pool_range" "*,*,1020,*,4096,*,*,*")
- (set_attr "neg_pool_range" "*,*,1008,*,4080,*,*,*")]
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ return \"fmsr%?\\t%0, %1\";
+ case 1:
+ return \"fmrs%?\\t%0, %1\";
+ case 2:
+ return \"fconsts%?\\t%0, #%G1\";
+ case 3: case 4:
+ return output_move_vfp (operands);
+ case 5:
+ return \"ldr%?\\t%0, %1\\t%@ float\";
+ case 6:
+ return \"str%?\\t%1, %0\\t%@ float\";
+ case 7:
+ return \"fcpys%?\\t%0, %1\";
+ case 8:
+ return \"mov%?\\t%0, %1\\t%@ float\";
+ default:
+ gcc_unreachable ();
+ }
+ "
+ [(set_attr "predicable" "yes")
+ (set_attr "type"
+ "r_2_f,f_2_r,farith,f_loads,f_stores,load1,store1,ffarith,*")
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "insn" "*,*,*,*,*,*,*,*,mov")
+ (set_attr "pool_range" "*,*,*,1020,*,4096,*,*,*")
+ (set_attr "neg_pool_range" "*,*,*,1008,*,4080,*,*,*")]
+)
+
+(define_insn "*thumb2_movsf_vfp"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=t,?r,t, t ,Uv,r ,m,t,r")
+ (match_operand:SF 1 "general_operand" " ?r,t,Dv,UvE,t, mE,r,t,r"))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_VFP
+ && ( s_register_operand (operands[0], SFmode)
+ || s_register_operand (operands[1], SFmode))"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ return \"fmsr%?\\t%0, %1\";
+ case 1:
+ return \"fmrs%?\\t%0, %1\";
+ case 2:
+ return \"fconsts%?\\t%0, #%G1\";
+ case 3: case 4:
+ return output_move_vfp (operands);
+ case 5:
+ return \"ldr%?\\t%0, %1\\t%@ float\";
+ case 6:
+ return \"str%?\\t%1, %0\\t%@ float\";
+ case 7:
+ return \"fcpys%?\\t%0, %1\";
+ case 8:
+ return \"mov%?\\t%0, %1\\t%@ float\";
+ default:
+ gcc_unreachable ();
+ }
+ "
+ [(set_attr "predicable" "yes")
+ (set_attr "type"
+ "r_2_f,f_2_r,farith,f_load,f_store,load1,store1,ffarith,*")
+ (set_attr "pool_range" "*,*,*,1020,*,4092,*,*,*")
+ (set_attr "neg_pool_range" "*,*,*,1008,*,0,*,*,*")]
)
;; DFmode moves
(define_insn "*movdf_vfp"
- [(set (match_operand:DF 0 "nonimmediate_soft_df_operand" "=w,?r,r, m,w ,Uv,w,r")
- (match_operand:DF 1 "soft_df_operand" " ?r,w,mF,r,UvF,w, w,r"))]
+ [(set (match_operand:DF 0 "nonimmediate_soft_df_operand" "=w,?r,w ,r, m,w ,Uv,w,r")
+ (match_operand:DF 1 "soft_df_operand" " ?r,w,Dv,mF,r,UvF,w, w,r"))]
"TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP
&& ( register_operand (operands[0], DFmode)
|| register_operand (operands[1], DFmode))"
@@ -222,37 +284,70 @@
return \"fmdrr%?\\t%P0, %Q1, %R1\";
case 1:
return \"fmrrd%?\\t%Q0, %R0, %P1\";
- case 2: case 3:
+ case 2:
+ return \"fconstd%?\\t%P0, #%G1\";
+ case 3: case 4:
return output_move_double (operands);
- case 4:
- return \"fldd%?\\t%P0, %1\";
- case 5:
- return \"fstd%?\\t%P1, %0\";
- case 6:
- return \"fcpyd%?\\t%P0, %P1\";
+ case 5: case 6:
+ return output_move_vfp (operands);
case 7:
+ return \"fcpyd%?\\t%P0, %P1\";
+ case 8:
return \"#\";
default:
gcc_unreachable ();
}
}
"
- [(set_attr "type" "r_2_f,f_2_r,ffarith,*,load2,store2,f_loadd,f_stored")
- (set_attr "length" "4,4,8,8,4,4,4,8")
- (set_attr "pool_range" "*,*,1020,*,1020,*,*,*")
- (set_attr "neg_pool_range" "*,*,1008,*,1008,*,*,*")]
+ [(set_attr "type"
+ "r_2_f,f_2_r,farith,f_loadd,f_stored,load2,store2,ffarith,*")
+ (set_attr "length" "4,4,4,8,8,4,4,4,8")
+ (set_attr "pool_range" "*,*,*,1020,*,1020,*,*,*")
+ (set_attr "neg_pool_range" "*,*,*,1008,*,1008,*,*,*")]
+)
+
+(define_insn "*thumb2_movdf_vfp"
+ [(set (match_operand:DF 0 "nonimmediate_soft_df_operand" "=w,?r,w ,r, m,w ,Uv,w,r")
+ (match_operand:DF 1 "soft_df_operand" " ?r,w,Dv,mF,r,UvF,w, w,r"))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_VFP"
+ "*
+ {
+ switch (which_alternative)
+ {
+ case 0:
+ return \"fmdrr%?\\t%P0, %Q1, %R1\";
+ case 1:
+ return \"fmrrd%?\\t%Q0, %R0, %P1\";
+ case 2:
+ return \"fconstd%?\\t%P0, #%G1\";
+ case 3: case 4: case 8:
+ return output_move_double (operands);
+ case 5: case 6:
+ return output_move_vfp (operands);
+ case 7:
+ return \"fcpyd%?\\t%P0, %P1\";
+ default:
+ abort ();
+ }
+ }
+ "
+ [(set_attr "type"
+ "r_2_f,f_2_r,farith,load2,store2,f_load,f_store,ffarith,*")
+ (set_attr "length" "4,4,4,8,8,4,4,4,8")
+ (set_attr "pool_range" "*,*,*,4096,*,1020,*,*,*")
+ (set_attr "neg_pool_range" "*,*,*,0,*,1008,*,*,*")]
)
;; Conditional move patterns
(define_insn "*movsfcc_vfp"
- [(set (match_operand:SF 0 "s_register_operand" "=w,w,w,w,w,w,?r,?r,?r")
+ [(set (match_operand:SF 0 "s_register_operand" "=t,t,t,t,t,t,?r,?r,?r")
(if_then_else:SF
(match_operator 3 "arm_comparison_operator"
[(match_operand 4 "cc_register" "") (const_int 0)])
- (match_operand:SF 1 "s_register_operand" "0,w,w,0,?r,?r,0,w,w")
- (match_operand:SF 2 "s_register_operand" "w,0,w,?r,0,?r,w,0,w")))]
+ (match_operand:SF 1 "s_register_operand" "0,t,t,0,?r,?r,0,t,t")
+ (match_operand:SF 2 "s_register_operand" "t,0,t,?r,0,?r,t,0,t")))]
"TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
"@
fcpys%D3\\t%0, %2
@@ -269,6 +364,30 @@
(set_attr "type" "ffarith,ffarith,ffarith,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")]
)
+(define_insn "*thumb2_movsfcc_vfp"
+ [(set (match_operand:SF 0 "s_register_operand" "=t,t,t,t,t,t,?r,?r,?r")
+ (if_then_else:SF
+ (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "s_register_operand" "0,t,t,0,?r,?r,0,t,t")
+ (match_operand:SF 2 "s_register_operand" "t,0,t,?r,0,?r,t,0,t")))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_VFP"
+ "@
+ it\\t%D3\;fcpys%D3\\t%0, %2
+ it\\t%d3\;fcpys%d3\\t%0, %1
+ ite\\t%D3\;fcpys%D3\\t%0, %2\;fcpys%d3\\t%0, %1
+ it\\t%D3\;fmsr%D3\\t%0, %2
+ it\\t%d3\;fmsr%d3\\t%0, %1
+ ite\\t%D3\;fmsr%D3\\t%0, %2\;fmsr%d3\\t%0, %1
+ it\\t%D3\;fmrs%D3\\t%0, %2
+ it\\t%d3\;fmrs%d3\\t%0, %1
+ ite\\t%D3\;fmrs%D3\\t%0, %2\;fmrs%d3\\t%0, %1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "6,6,10,6,6,10,6,6,10")
+ (set_attr "type" "ffarith,ffarith,ffarith,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")]
+)
+;; APPLE LOCAL end v7 support. Merge from mainline
+
(define_insn "*movdfcc_vfp"
[(set (match_operand:DF 0 "s_register_operand" "=w,w,w,w,w,w,?r,?r,?r")
(if_then_else:DF
@@ -292,13 +411,37 @@
(set_attr "type" "ffarith,ffarith,ffarith,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")]
)
+;; APPLE LOCAL begin v7 support. Merge from mainline
+(define_insn "*thumb2_movdfcc_vfp"
+ [(set (match_operand:DF 0 "s_register_operand" "=w,w,w,w,w,w,?r,?r,?r")
+ (if_then_else:DF
+ (match_operator 3 "arm_comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:DF 1 "s_register_operand" "0,w,w,0,?r,?r,0,w,w")
+ (match_operand:DF 2 "s_register_operand" "w,0,w,?r,0,?r,w,0,w")))]
+ "TARGET_THUMB2 && TARGET_HARD_FLOAT && TARGET_VFP"
+ "@
+ it\\t%D3\;fcpyd%D3\\t%P0, %P2
+ it\\t%d3\;fcpyd%d3\\t%P0, %P1
+ ite\\t%D3\;fcpyd%D3\\t%P0, %P2\;fcpyd%d3\\t%P0, %P1
+ it\t%D3\;fmdrr%D3\\t%P0, %Q2, %R2
+ it\t%d3\;fmdrr%d3\\t%P0, %Q1, %R1
+ ite\\t%D3\;fmdrr%D3\\t%P0, %Q2, %R2\;fmdrr%d3\\t%P0, %Q1, %R1
+ it\t%D3\;fmrrd%D3\\t%Q0, %R0, %P2
+ it\t%d3\;fmrrd%d3\\t%Q0, %R0, %P1
+ ite\\t%D3\;fmrrd%D3\\t%Q0, %R0, %P2\;fmrrd%d3\\t%Q0, %R0, %P1"
+ [(set_attr "conds" "use")
+ (set_attr "length" "6,6,10,6,6,10,6,6,10")
+ (set_attr "type" "ffarith,ffarith,ffarith,r_2_f,r_2_f,r_2_f,f_2_r,f_2_r,f_2_r")]
+)
+
;; Sign manipulation functions
(define_insn "*abssf2_vfp"
- [(set (match_operand:SF 0 "s_register_operand" "=w")
- (abs:SF (match_operand:SF 1 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (abs:SF (match_operand:SF 1 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"fabss%?\\t%0, %1"
[(set_attr "predicable" "yes")
(set_attr "type" "ffarith")]
@@ -307,16 +450,16 @@
(define_insn "*absdf2_vfp"
[(set (match_operand:DF 0 "s_register_operand" "=w")
(abs:DF (match_operand:DF 1 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"fabsd%?\\t%P0, %P1"
[(set_attr "predicable" "yes")
(set_attr "type" "ffarith")]
)
(define_insn "*negsf2_vfp"
- [(set (match_operand:SF 0 "s_register_operand" "=w,?r")
- (neg:SF (match_operand:SF 1 "s_register_operand" "w,r")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ [(set (match_operand:SF 0 "s_register_operand" "=t,?r")
+ (neg:SF (match_operand:SF 1 "s_register_operand" "t,r")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"@
fnegs%?\\t%0, %1
eor%?\\t%0, %1, #-2147483648"
@@ -327,14 +470,15 @@
(define_insn_and_split "*negdf2_vfp"
[(set (match_operand:DF 0 "s_register_operand" "=w,?r,?r")
(neg:DF (match_operand:DF 1 "s_register_operand" "w,0,r")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"@
fnegd%?\\t%P0, %P1
#
#"
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP && reload_completed
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP && reload_completed
&& arm_general_register_operand (operands[0], DFmode)"
[(set (match_dup 0) (match_dup 1))]
+;; APPLE LOCAL end v7 support. Merge from mainline
"
if (REGNO (operands[0]) == REGNO (operands[1]))
{
@@ -374,10 +518,13 @@
;; Arithmetic insns
(define_insn "*addsf3_vfp"
- [(set (match_operand:SF 0 "s_register_operand" "=w")
- (plus:SF (match_operand:SF 1 "s_register_operand" "w")
- (match_operand:SF 2 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (plus:SF (match_operand:SF 1 "s_register_operand" "t")
+ (match_operand:SF 2 "s_register_operand" "t")))]
+;; APPLE LOCAL 6150859 use NEON instructions for SF math
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP && !TARGET_NEON"
+;; APPLE LOCAL end v7 support. Merge from mainline
"fadds%?\\t%0, %1, %2"
[(set_attr "predicable" "yes")
(set_attr "type" "farith")]
@@ -387,7 +534,8 @@
[(set (match_operand:DF 0 "s_register_operand" "=w")
(plus:DF (match_operand:DF 1 "s_register_operand" "w")
(match_operand:DF 2 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"faddd%?\\t%P0, %P1, %P2"
[(set_attr "predicable" "yes")
(set_attr "type" "farith")]
@@ -395,10 +543,13 @@
(define_insn "*subsf3_vfp"
- [(set (match_operand:SF 0 "s_register_operand" "=w")
- (minus:SF (match_operand:SF 1 "s_register_operand" "w")
- (match_operand:SF 2 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (minus:SF (match_operand:SF 1 "s_register_operand" "t")
+ (match_operand:SF 2 "s_register_operand" "t")))]
+;; APPLE LOCAL 6150859 use NEON instructions for SF math
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP && !TARGET_NEON"
+;; APPLE LOCAL end v7 support. Merge from mainline
"fsubs%?\\t%0, %1, %2"
[(set_attr "predicable" "yes")
(set_attr "type" "farith")]
@@ -408,7 +559,8 @@
[(set (match_operand:DF 0 "s_register_operand" "=w")
(minus:DF (match_operand:DF 1 "s_register_operand" "w")
(match_operand:DF 2 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"fsubd%?\\t%P0, %P1, %P2"
[(set_attr "predicable" "yes")
(set_attr "type" "farith")]
@@ -418,10 +570,12 @@
;; Division insns
(define_insn "*divsf3_vfp"
- [(set (match_operand:SF 0 "s_register_operand" "+w")
- (div:SF (match_operand:SF 1 "s_register_operand" "w")
- (match_operand:SF 2 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "+t")
+ (div:SF (match_operand:SF 1 "s_register_operand" "t")
+ (match_operand:SF 2 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
"fdivs%?\\t%0, %1, %2"
[(set_attr "predicable" "yes")
(set_attr "type" "fdivs")]
@@ -431,7 +585,8 @@
[(set (match_operand:DF 0 "s_register_operand" "+w")
(div:DF (match_operand:DF 1 "s_register_operand" "w")
(match_operand:DF 2 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"fdivd%?\\t%P0, %P1, %P2"
[(set_attr "predicable" "yes")
(set_attr "type" "fdivd")]
@@ -441,44 +596,55 @@
;; Multiplication insns
(define_insn "*mulsf3_vfp"
- [(set (match_operand:SF 0 "s_register_operand" "+w")
- (mult:SF (match_operand:SF 1 "s_register_operand" "w")
- (match_operand:SF 2 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "+t")
+ (mult:SF (match_operand:SF 1 "s_register_operand" "t")
+ (match_operand:SF 2 "s_register_operand" "t")))]
+;; APPLE LOCAL 6150859 use NEON instructions for SF math
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP && !TARGET_NEON"
+;; APPLE LOCAL end v7 support. Merge from mainline
"fmuls%?\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "type" "farith")]
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmuls")]
)
(define_insn "*muldf3_vfp"
[(set (match_operand:DF 0 "s_register_operand" "+w")
(mult:DF (match_operand:DF 1 "s_register_operand" "w")
(match_operand:DF 2 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"fmuld%?\\t%P0, %P1, %P2"
[(set_attr "predicable" "yes")
- (set_attr "type" "fmul")]
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmuld")]
)
(define_insn "*mulsf3negsf_vfp"
- [(set (match_operand:SF 0 "s_register_operand" "+w")
- (mult:SF (neg:SF (match_operand:SF 1 "s_register_operand" "w"))
- (match_operand:SF 2 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "+t")
+ (mult:SF (neg:SF (match_operand:SF 1 "s_register_operand" "t"))
+ (match_operand:SF 2 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
"fnmuls%?\\t%0, %1, %2"
[(set_attr "predicable" "yes")
- (set_attr "type" "farith")]
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmuls")]
)
(define_insn "*muldf3negdf_vfp"
[(set (match_operand:DF 0 "s_register_operand" "+w")
(mult:DF (neg:DF (match_operand:DF 1 "s_register_operand" "w"))
(match_operand:DF 2 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"fnmuld%?\\t%P0, %P1, %P2"
[(set_attr "predicable" "yes")
- (set_attr "type" "fmul")]
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmuld")]
)
@@ -486,14 +652,18 @@
;; 0 = 1 * 2 + 0
(define_insn "*mulsf3addsf_vfp"
- [(set (match_operand:SF 0 "s_register_operand" "=w")
- (plus:SF (mult:SF (match_operand:SF 2 "s_register_operand" "w")
- (match_operand:SF 3 "s_register_operand" "w"))
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (plus:SF (mult:SF (match_operand:SF 2 "s_register_operand" "t")
+ (match_operand:SF 3 "s_register_operand" "t"))
(match_operand:SF 1 "s_register_operand" "0")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL 6150859 use NEON instructions for SF math
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP && !TARGET_NEON"
+;; APPLE LOCAL end v7 support. Merge from mainline
"fmacs%?\\t%0, %2, %3"
[(set_attr "predicable" "yes")
- (set_attr "type" "farith")]
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmacs")]
)
(define_insn "*muldf3adddf_vfp"
@@ -501,22 +671,28 @@
(plus:DF (mult:DF (match_operand:DF 2 "s_register_operand" "w")
(match_operand:DF 3 "s_register_operand" "w"))
(match_operand:DF 1 "s_register_operand" "0")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"fmacd%?\\t%P0, %P2, %P3"
[(set_attr "predicable" "yes")
- (set_attr "type" "fmul")]
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmacd")]
)
;; 0 = 1 * 2 - 0
(define_insn "*mulsf3subsf_vfp"
- [(set (match_operand:SF 0 "s_register_operand" "=w")
- (minus:SF (mult:SF (match_operand:SF 2 "s_register_operand" "w")
- (match_operand:SF 3 "s_register_operand" "w"))
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (minus:SF (mult:SF (match_operand:SF 2 "s_register_operand" "t")
+ (match_operand:SF 3 "s_register_operand" "t"))
(match_operand:SF 1 "s_register_operand" "0")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL 6150859 use NEON instructions for SF math
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP && !TARGET_NEON"
+;; APPLE LOCAL end v7 support. Merge from mainline
"fmscs%?\\t%0, %2, %3"
[(set_attr "predicable" "yes")
- (set_attr "type" "farith")]
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmacs")]
)
(define_insn "*muldf3subdf_vfp"
@@ -524,22 +700,27 @@
(minus:DF (mult:DF (match_operand:DF 2 "s_register_operand" "w")
(match_operand:DF 3 "s_register_operand" "w"))
(match_operand:DF 1 "s_register_operand" "0")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"fmscd%?\\t%P0, %P2, %P3"
[(set_attr "predicable" "yes")
- (set_attr "type" "fmul")]
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmacd")]
)
;; 0 = -(1 * 2) + 0
(define_insn "*mulsf3negsfaddsf_vfp"
- [(set (match_operand:SF 0 "s_register_operand" "=w")
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
(minus:SF (match_operand:SF 1 "s_register_operand" "0")
- (mult:SF (match_operand:SF 2 "s_register_operand" "w")
- (match_operand:SF 3 "s_register_operand" "w"))))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ (mult:SF (match_operand:SF 2 "s_register_operand" "t")
+ (match_operand:SF 3 "s_register_operand" "t"))))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
"fnmacs%?\\t%0, %2, %3"
[(set_attr "predicable" "yes")
- (set_attr "type" "farith")]
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmacs")]
)
(define_insn "*fmuldf3negdfadddf_vfp"
@@ -547,24 +728,28 @@
(minus:DF (match_operand:DF 1 "s_register_operand" "0")
(mult:DF (match_operand:DF 2 "s_register_operand" "w")
(match_operand:DF 3 "s_register_operand" "w"))))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"fnmacd%?\\t%P0, %P2, %P3"
[(set_attr "predicable" "yes")
- (set_attr "type" "fmul")]
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmacd")]
)
;; 0 = -(1 * 2) - 0
(define_insn "*mulsf3negsfsubsf_vfp"
- [(set (match_operand:SF 0 "s_register_operand" "=w")
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
(minus:SF (mult:SF
- (neg:SF (match_operand:SF 2 "s_register_operand" "w"))
- (match_operand:SF 3 "s_register_operand" "w"))
+ (neg:SF (match_operand:SF 2 "s_register_operand" "t"))
+ (match_operand:SF 3 "s_register_operand" "t"))
(match_operand:SF 1 "s_register_operand" "0")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"fnmscs%?\\t%0, %2, %3"
[(set_attr "predicable" "yes")
- (set_attr "type" "farith")]
+ (set_attr "type" "fmacs")]
+;; APPLE LOCAL end v7 support. Merge from Codesourcery
)
(define_insn "*muldf3negdfsubdf_vfp"
@@ -573,46 +758,56 @@
(neg:DF (match_operand:DF 2 "s_register_operand" "w"))
(match_operand:DF 3 "s_register_operand" "w"))
(match_operand:DF 1 "s_register_operand" "0")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"fnmscd%?\\t%P0, %P2, %P3"
[(set_attr "predicable" "yes")
- (set_attr "type" "fmul")]
+;; APPLE LOCAL v7 support. Merge from Codesourcery
+ (set_attr "type" "fmacd")]
)
;; Conversion routines
(define_insn "*extendsfdf2_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
[(set (match_operand:DF 0 "s_register_operand" "=w")
- (float_extend:DF (match_operand:SF 1 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ (float_extend:DF (match_operand:SF 1 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
"fcvtds%?\\t%P0, %1"
[(set_attr "predicable" "yes")
(set_attr "type" "f_cvt")]
)
(define_insn "*truncdfsf2_vfp"
- [(set (match_operand:SF 0 "s_register_operand" "=w")
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
(float_truncate:SF (match_operand:DF 1 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
"fcvtsd%?\\t%0, %P1"
[(set_attr "predicable" "yes")
(set_attr "type" "f_cvt")]
)
(define_insn "*truncsisf2_vfp"
- [(set (match_operand:SI 0 "s_register_operand" "=w")
- (fix:SI (fix:SF (match_operand:SF 1 "s_register_operand" "w"))))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SI 0 "s_register_operand" "=t")
+ (fix:SI (fix:SF (match_operand:SF 1 "s_register_operand" "t"))))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
"ftosizs%?\\t%0, %1"
[(set_attr "predicable" "yes")
(set_attr "type" "f_cvt")]
)
(define_insn "*truncsidf2_vfp"
- [(set (match_operand:SI 0 "s_register_operand" "=w")
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SI 0 "s_register_operand" "=t")
(fix:SI (fix:DF (match_operand:DF 1 "s_register_operand" "w"))))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
"ftosizd%?\\t%0, %P1"
[(set_attr "predicable" "yes")
(set_attr "type" "f_cvt")]
@@ -620,37 +815,45 @@
(define_insn "fixuns_truncsfsi2"
- [(set (match_operand:SI 0 "s_register_operand" "=w")
- (unsigned_fix:SI (fix:SF (match_operand:SF 1 "s_register_operand" "w"))))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SI 0 "s_register_operand" "=t")
+ (unsigned_fix:SI (fix:SF (match_operand:SF 1 "s_register_operand" "t"))))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
"ftouizs%?\\t%0, %1"
[(set_attr "predicable" "yes")
(set_attr "type" "f_cvt")]
)
(define_insn "fixuns_truncdfsi2"
- [(set (match_operand:SI 0 "s_register_operand" "=w")
- (unsigned_fix:SI (fix:DF (match_operand:DF 1 "s_register_operand" "w"))))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SI 0 "s_register_operand" "=t")
+ (unsigned_fix:SI (fix:DF (match_operand:DF 1 "s_register_operand" "t"))))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"ftouizd%?\\t%0, %P1"
+;; APPLE LOCAL end v7 support. Merge from mainline
[(set_attr "predicable" "yes")
(set_attr "type" "f_cvt")]
)
(define_insn "*floatsisf2_vfp"
- [(set (match_operand:SF 0 "s_register_operand" "=w")
- (float:SF (match_operand:SI 1 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (float:SF (match_operand:SI 1 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
"fsitos%?\\t%0, %1"
[(set_attr "predicable" "yes")
(set_attr "type" "f_cvt")]
)
(define_insn "*floatsidf2_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
[(set (match_operand:DF 0 "s_register_operand" "=w")
- (float:DF (match_operand:SI 1 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ (float:DF (match_operand:SI 1 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
"fsitod%?\\t%P0, %1"
[(set_attr "predicable" "yes")
(set_attr "type" "f_cvt")]
@@ -658,18 +861,22 @@
(define_insn "floatunssisf2"
- [(set (match_operand:SF 0 "s_register_operand" "=w")
- (unsigned_float:SF (match_operand:SI 1 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (unsigned_float:SF (match_operand:SI 1 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
"fuitos%?\\t%0, %1"
[(set_attr "predicable" "yes")
(set_attr "type" "f_cvt")]
)
(define_insn "floatunssidf2"
+;; APPLE LOCAL begin v7 support. Merge from mainline
[(set (match_operand:DF 0 "s_register_operand" "=w")
- (unsigned_float:DF (match_operand:SI 1 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ (unsigned_float:DF (match_operand:SI 1 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
"fuitod%?\\t%P0, %1"
[(set_attr "predicable" "yes")
(set_attr "type" "f_cvt")]
@@ -679,9 +886,11 @@
;; Sqrt insns.
(define_insn "*sqrtsf2_vfp"
- [(set (match_operand:SF 0 "s_register_operand" "=w")
- (sqrt:SF (match_operand:SF 1 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (sqrt:SF (match_operand:SF 1 "s_register_operand" "t")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
"fsqrts%?\\t%0, %1"
[(set_attr "predicable" "yes")
(set_attr "type" "fdivs")]
@@ -690,7 +899,8 @@
(define_insn "*sqrtdf2_vfp"
[(set (match_operand:DF 0 "s_register_operand" "=w")
(sqrt:DF (match_operand:DF 1 "s_register_operand" "w")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"fsqrtd%?\\t%P0, %P1"
[(set_attr "predicable" "yes")
(set_attr "type" "fdivd")]
@@ -702,19 +912,22 @@
(define_insn "*movcc_vfp"
[(set (reg CC_REGNUM)
(reg VFPCC_REGNUM))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"fmstat%?"
[(set_attr "conds" "set")
(set_attr "type" "f_flag")]
)
(define_insn_and_split "*cmpsf_split_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
[(set (reg:CCFP CC_REGNUM)
- (compare:CCFP (match_operand:SF 0 "s_register_operand" "w")
- (match_operand:SF 1 "vfp_compare_operand" "wG")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ (compare:CCFP (match_operand:SF 0 "s_register_operand" "t")
+ (match_operand:SF 1 "vfp_compare_operand" "tG")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"#"
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
[(set (reg:CCFP VFPCC_REGNUM)
(compare:CCFP (match_dup 0)
(match_dup 1)))
@@ -724,12 +937,14 @@
)
(define_insn_and_split "*cmpsf_trap_split_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
[(set (reg:CCFPE CC_REGNUM)
- (compare:CCFPE (match_operand:SF 0 "s_register_operand" "w")
- (match_operand:SF 1 "vfp_compare_operand" "wG")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ (compare:CCFPE (match_operand:SF 0 "s_register_operand" "t")
+ (match_operand:SF 1 "vfp_compare_operand" "tG")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"#"
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
[(set (reg:CCFPE VFPCC_REGNUM)
(compare:CCFPE (match_dup 0)
(match_dup 1)))
@@ -742,9 +957,11 @@
[(set (reg:CCFP CC_REGNUM)
(compare:CCFP (match_operand:DF 0 "s_register_operand" "w")
(match_operand:DF 1 "vfp_compare_operand" "wG")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"#"
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
[(set (reg:CCFP VFPCC_REGNUM)
(compare:CCFP (match_dup 0)
(match_dup 1)))
@@ -757,9 +974,11 @@
[(set (reg:CCFPE CC_REGNUM)
(compare:CCFPE (match_operand:DF 0 "s_register_operand" "w")
(match_operand:DF 1 "vfp_compare_operand" "wG")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"#"
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
[(set (reg:CCFPE VFPCC_REGNUM)
(compare:CCFPE (match_dup 0)
(match_dup 1)))
@@ -772,10 +991,12 @@
;; Comparison patterns
(define_insn "*cmpsf_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
[(set (reg:CCFP VFPCC_REGNUM)
- (compare:CCFP (match_operand:SF 0 "s_register_operand" "w,w")
- (match_operand:SF 1 "vfp_compare_operand" "w,G")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ (compare:CCFP (match_operand:SF 0 "s_register_operand" "t,t")
+ (match_operand:SF 1 "vfp_compare_operand" "t,G")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
"@
fcmps%?\\t%0, %1
fcmpzs%?\\t%0"
@@ -784,10 +1005,12 @@
)
(define_insn "*cmpsf_trap_vfp"
+;; APPLE LOCAL begin v7 support. Merge from mainline
[(set (reg:CCFPE VFPCC_REGNUM)
- (compare:CCFPE (match_operand:SF 0 "s_register_operand" "w,w")
- (match_operand:SF 1 "vfp_compare_operand" "w,G")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+ (compare:CCFPE (match_operand:SF 0 "s_register_operand" "t,t")
+ (match_operand:SF 1 "vfp_compare_operand" "t,G")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL end v7 support. Merge from mainline
"@
fcmpes%?\\t%0, %1
fcmpezs%?\\t%0"
@@ -799,7 +1022,8 @@
[(set (reg:CCFP VFPCC_REGNUM)
(compare:CCFP (match_operand:DF 0 "s_register_operand" "w,w")
(match_operand:DF 1 "vfp_compare_operand" "w,G")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"@
fcmpd%?\\t%P0, %P1
fcmpzd%?\\t%P0"
@@ -811,7 +1035,8 @@
[(set (reg:CCFPE VFPCC_REGNUM)
(compare:CCFPE (match_operand:DF 0 "s_register_operand" "w,w")
(match_operand:DF 1 "vfp_compare_operand" "w,G")))]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
+;; APPLE LOCAL v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
"@
fcmped%?\\t%P0, %P1
fcmpezd%?\\t%P0"
@@ -827,8 +1052,10 @@
[(set (match_operand:BLK 0 "memory_operand" "=m")
(unspec:BLK [(match_operand:DF 1 "s_register_operand" "w")]
UNSPEC_PUSH_MULT))])]
- "TARGET_ARM && TARGET_HARD_FLOAT && TARGET_VFP"
- "* return vfp_output_fstmx (operands);"
+;; APPLE LOCAL begin v7 support. Merge from mainline
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
+ "* return vfp_output_fstmd (operands);"
+;; APPLE LOCAL end v7 support. Merge from mainline
[(set_attr "type" "f_stored")]
)
Added: llvm-gcc-4.2/trunk/gcc/config/arm/vfp11.md
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/vfp11.md?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/vfp11.md (added)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/vfp11.md Wed Jul 22 15:36:27 2009
@@ -0,0 +1,94 @@
+;; APPLE LOCAL file v7 support. Merge from Codesourcery
+;; ARM VFP11 pipeline description
+;; Copyright (C) 2003, 2005, 2007 Free Software Foundation, Inc.
+;; Written by CodeSourcery, LLC.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the Free
+;; Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+;; 02110-1301, USA. */
+
+(define_automaton "vfp11")
+
+;; There are 3 pipelines in the VFP11 unit.
+;;
+;; - A 8-stage FMAC pipeline (7 execute + writeback) with forward from
+;; fourth stage for simple operations.
+;;
+;; - A 5-stage DS pipeline (4 execute + writeback) for divide/sqrt insns.
+;; These insns also uses first execute stage of FMAC pipeline.
+;;
+;; - A 4-stage LS pipeline (execute + 2 memory + writeback) with forward from
+;; second memory stage for loads.
+
+;; We do not model Write-After-Read hazards.
+;; We do not do write scheduling with the arm core, so it is only necessary
+;; to model the first stage of each pipeline
+;; ??? Need to model LS pipeline properly for load/store multiple?
+;; We do not model fmstat properly. This could be done by modeling pipelines
+;; properly and defining an absence set between a dummy fmstat unit and all
+;; other vfp units.
+
+(define_cpu_unit "fmac" "vfp11")
+
+(define_cpu_unit "ds" "vfp11")
+
+(define_cpu_unit "vfp_ls" "vfp11")
+
+(define_cpu_unit "fmstat" "vfp11")
+
+(exclusion_set "fmac,ds" "fmstat")
+
+(define_insn_reservation "vfp_ffarith" 4
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "ffarith"))
+ "fmac")
+
+(define_insn_reservation "vfp_farith" 8
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "farith,f_cvt,fmuls,fmacs"))
+ "fmac")
+
+(define_insn_reservation "vfp_fmul" 9
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "fmuld,fmacd"))
+ "fmac*2")
+
+(define_insn_reservation "vfp_fdivs" 19
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "fdivs"))
+ "ds*15")
+
+(define_insn_reservation "vfp_fdivd" 33
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "fdivd"))
+ "fmac+ds*29")
+
+;; Moves to/from arm regs also use the load/store pipeline.
+(define_insn_reservation "vfp_fload" 4
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "f_loads,f_loadd,r_2_f"))
+ "vfp_ls")
+
+(define_insn_reservation "vfp_fstore" 4
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "f_stores,f_stored,f_2_r"))
+ "vfp_ls")
+
+(define_insn_reservation "vfp_to_cpsr" 4
+ (and (eq_attr "generic_vfp" "yes")
+ (eq_attr "type" "f_flag"))
+ "fmstat,vfp_ls*3")
+
Modified: llvm-gcc-4.2/trunk/gcc/config/darwin-c.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/darwin-c.c?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/darwin-c.c (original)
+++ llvm-gcc-4.2/trunk/gcc/config/darwin-c.c Wed Jul 22 15:36:27 2009
@@ -1161,14 +1161,14 @@
argument = TREE_CHAIN (argument);
}
- /* APPLE LOCAL begin 7020016 */
+ /* LLVM LOCAL begin 7020016 */
if (argument == NULL_TREE)
{
error ("argument number of CFString format too large");
*no_add_attrs = true;
return false;
}
- /* APPLE LOCAL end 7020016 */
+ /* LLVM LOCAL end 7020016 */
if (!objc_check_cfstringref_type (TREE_VALUE (argument)))
{
error ("format CFString argument not an 'CFStringRef' type");
Modified: llvm-gcc-4.2/trunk/gcc/config/darwin-driver.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/darwin-driver.c?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/darwin-driver.c (original)
+++ llvm-gcc-4.2/trunk/gcc/config/darwin-driver.c Wed Jul 22 15:36:27 2009
@@ -143,13 +143,13 @@
}
/* For iPhone OS, if no version number is specified, we default to
- 2.0. */
+ 3.0. */
if (vers_type == DARWIN_VERSION_IPHONEOS)
{
++*argc_p;
*argv_p = xmalloc (sizeof (char *) * *argc_p);
(*argv_p)[0] = argv[0];
- (*argv_p)[1] = xstrdup ("-miphoneos-version-min=2.0");
+ (*argv_p)[1] = xstrdup ("-miphoneos-version-min=3.0");
memcpy (*argv_p + 2, argv + 1, (argc - 1) * sizeof (char *));
return;
}
Modified: llvm-gcc-4.2/trunk/gcc/config/darwin.h
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/darwin.h?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/darwin.h (original)
+++ llvm-gcc-4.2/trunk/gcc/config/darwin.h Wed Jul 22 15:36:27 2009
@@ -433,9 +433,9 @@
%{mmacosx-version-min=*:-macosx_version_min %*} \
%{miphoneos-version-min=*:-iphoneos_version_min %*} \
"/* APPLE LOCAL end ARM 5683689 */"\
- "/* APPLE LOCAL begin llvm */\
+ "/* LLVM LOCAL begin */\
LLVM_LINK_SPEC \
- /* APPLE LOCAL end llvm */" \
+ /* LLVM LOCAL end */" \
%{nomultidefs} \
%{Zmulti_module:-multi_module} %{Zsingle_module:-single_module} \
%{Zmultiply_defined*:-multiply_defined %*} \
@@ -550,23 +550,30 @@
{ "darwin_iphoneos_libgcc", DARWIN_IPHONEOS_LIBGCC_SPEC },
/* APPLE LOCAL begin ARM 5683689 */
+/* APPLE LOCAL begin link optimizations 6999417 */
#define DARWIN_DYLIB1_SPEC \
- "%{miphoneos-version-min=*: -ldylib1.o} \
+ "%{miphoneos-version-min=*: \
+ %:version-compare(< 3.1 miphoneos-version-min= -ldylib1.o)} \
%{!miphoneos-version-min=*: \
%:version-compare(!> 10.5 mmacosx-version-min= -ldylib1.o) \
%:version-compare(>= 10.5 mmacosx-version-min= -ldylib1.10.5.o)}"
/* APPLE LOCAL begin link optimizations 6499452 */
#define DARWIN_BUNDLE1_SPEC \
- "-lbundle1.o"
+ "%{miphoneos-version-min=*: \
+ %:version-compare(< 3.1 miphoneos-version-min= -lbundle1.o)} \
+ %{!miphoneos-version-min=*: -lbundle1.o }"
/* APPLE LOCAL end link optimizations 6499452 */
#define DARWIN_CRT1_SPEC \
/* APPLE LOCAL ARM 5823776 iphoneos should use crt1.o */ \
- "%{miphoneos-version-min=*: -lcrt1.o} \
+ "%{miphoneos-version-min=*: \
+ %:version-compare(< 3.1 miphoneos-version-min= -lcrt1.o) \
+ %:version-compare(>= 3.1 miphoneos-version-min= -lcrt1.3.1.o)} \
%{!miphoneos-version-min=*: \
%:version-compare(!> 10.5 mmacosx-version-min= -lcrt1.o) \
%:version-compare(>= 10.5 mmacosx-version-min= -lcrt1.10.5.o)}"
+/* APPLE LOCAL end link optimizations 6999417 */
/* APPLE LOCAL end ARM 5683689 */
/* APPLE LOCAL begin prefer -lSystem 6645902 */
Modified: llvm-gcc-4.2/trunk/gcc/config/rs6000/rs6000.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/rs6000/rs6000.c?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/rs6000/rs6000.c (original)
+++ llvm-gcc-4.2/trunk/gcc/config/rs6000/rs6000.c Wed Jul 22 15:36:27 2009
@@ -6758,7 +6758,7 @@
{
/* Ensure that we don't find any more args in regs.
Alignment has taken care of the n_reg == 2 gpr case. */
- t = build2 (MODIFY_EXPR, TREE_TYPE (reg), reg, build_int_cstu(unsigned_char_type_node, 8));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (reg), reg, size_int (8));
gimplify_and_add (t, pre_p);
}
}
Modified: llvm-gcc-4.2/trunk/gcc/configure.ac
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/configure.ac?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/configure.ac (original)
+++ llvm-gcc-4.2/trunk/gcc/configure.ac Wed Jul 22 15:36:27 2009
@@ -963,9 +963,11 @@
# See if makeinfo has been installed and is modern enough
# that we can use it.
+# LLVM LOCAL begin Recognize more recent versions of makeinfo.
gcc_AC_CHECK_PROG_VER(MAKEINFO, makeinfo, --version,
[GNU texinfo.* \([0-9][0-9.]*\)],
[4.[4-9]* | 4.1[0-9]*])
+# LLVM LOCAL end Recognize more recent versions of makeinfo.
if test $gcc_cv_prog_makeinfo_modern = no; then
MAKEINFO="$MISSING makeinfo"
AC_MSG_WARN([
Modified: llvm-gcc-4.2/trunk/gcc/cp/ChangeLog.apple
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/cp/ChangeLog.apple?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/cp/ChangeLog.apple (original)
+++ llvm-gcc-4.2/trunk/gcc/cp/ChangeLog.apple Wed Jul 22 15:36:27 2009
@@ -1,3 +1,9 @@
+2009-06-15 Fariborz Jahanian <fjahanian at apple.com>
+
+ Radar 6936421
+ * cvt.c (force_rvalue): Convert property reference
+ expression to its getter call before converting to rvalue.
+
2009-02-11 Fariborz Jahanian <fjahanian at apple.com>
Radar 6573923
Modified: llvm-gcc-4.2/trunk/gcc/cp/cvt.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/cp/cvt.c?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/cp/cvt.c (original)
+++ llvm-gcc-4.2/trunk/gcc/cp/cvt.c Wed Jul 22 15:36:27 2009
@@ -578,9 +578,14 @@
tree
force_rvalue (tree expr)
{
- if (IS_AGGR_TYPE (TREE_TYPE (expr)) && TREE_CODE (expr) != TARGET_EXPR)
+ /* APPLE LOCAL begin radar 6936421 */
+ if (IS_AGGR_TYPE (TREE_TYPE (expr)) && TREE_CODE (expr) != TARGET_EXPR) {
+ if (objc_property_reference_expr (expr))
+ expr = objc_build_property_getter_func_call (expr);
expr = ocp_convert (TREE_TYPE (expr), expr,
CONV_IMPLICIT|CONV_FORCE_TEMP, LOOKUP_NORMAL);
+ }
+ /* APPLE LOCAL end radar 6936421 */
else
expr = decay_conversion (expr);
Modified: llvm-gcc-4.2/trunk/gcc/cp/decl2.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/cp/decl2.c?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/cp/decl2.c (original)
+++ llvm-gcc-4.2/trunk/gcc/cp/decl2.c Wed Jul 22 15:36:27 2009
@@ -1754,7 +1754,10 @@
tree underlying_type = TREE_TYPE (DECL_NAME (decl));
int underlying_vis = type_visibility (underlying_type);
if (underlying_vis == VISIBILITY_ANON
- || CLASSTYPE_VISIBILITY_SPECIFIED (underlying_type))
+ /* APPLE LOCAL begin 6983171 */
+ || (TREE_CODE (underlying_type) == RECORD_TYPE
+ && CLASSTYPE_VISIBILITY_SPECIFIED (underlying_type)))
+ /* APPLE LOCAL end 6983171 */
constrain_visibility (decl, underlying_vis);
else
DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
Modified: llvm-gcc-4.2/trunk/gcc/cp/name-lookup.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/cp/name-lookup.c?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/cp/name-lookup.c (original)
+++ llvm-gcc-4.2/trunk/gcc/cp/name-lookup.c Wed Jul 22 15:36:27 2009
@@ -42,6 +42,7 @@
#define EMPTY_SCOPE_BINDING { NULL_TREE, NULL_TREE }
static cxx_scope *innermost_nonclass_level (void);
+
static cxx_binding *binding_for_name (cxx_scope *, tree);
static tree lookup_name_innermost_nonclass_level (tree);
static tree push_overloaded_decl (tree, int, bool);
@@ -3747,13 +3748,18 @@
tree siter;
struct cp_binding_level *level;
tree val = NULL_TREE;
+#ifndef ENABLE_LLVM
+ struct scope_binding binding = EMPTY_SCOPE_BINDING;
+#endif
timevar_push (TV_NAME_LOOKUP);
for (; !val; scope = CP_DECL_CONTEXT (scope))
{
/* LLVM LOCAL begin mainline */
+#ifdef ENABLE_LLVM
struct scope_binding binding = EMPTY_SCOPE_BINDING;
+#endif
/* LLVM LOCAL end mainline */
cxx_binding *b =
cxx_scope_find_binding_for_name (NAMESPACE_LEVEL (scope), name);
Added: llvm-gcc-4.2/trunk/gcc/doc/arm-neon-intrinsics.texi
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/doc/arm-neon-intrinsics.texi?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/doc/arm-neon-intrinsics.texi (added)
+++ llvm-gcc-4.2/trunk/gcc/doc/arm-neon-intrinsics.texi Wed Jul 22 15:36:27 2009
@@ -0,0 +1,11294 @@
+ at c APPLE LOCAL file ARM NEON Intrinsics. Merge from Codesourcery. */
+ at c Copyright (C) 2006 Free Software Foundation, Inc.
+ at c This is part of the GCC manual.
+ at c For copying conditions, see the file gcc.texi.
+
+ at c This file is generated automatically using gcc/config/arm/neon-docgen.ml
+ at c Please do not edit manually.
+ at subsubsection Addition
+
+ at itemize @bullet
+ at item uint32x2_t vadd_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vadd_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vadd_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vadd_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vadd_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vadd_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vadd_u64 (uint64x1_t, uint64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vadd_s64 (int64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vadd_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vaddq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vaddq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vaddq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vaddq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vaddq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vaddq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vaddq_u64 (uint64x2_t, uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vaddq_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.i64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vaddq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vadd.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vaddl_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vaddl.u32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vaddl_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vaddl.u16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vaddl_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vaddl.u8 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vaddl_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vaddl.s32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vaddl_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vaddl.s16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vaddl_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vaddl.s8 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vaddw_u32 (uint64x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vaddw.u32 @var{q0}, @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vaddw_u16 (uint32x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vaddw.u16 @var{q0}, @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vaddw_u8 (uint16x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vaddw.u8 @var{q0}, @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vaddw_s32 (int64x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vaddw.s32 @var{q0}, @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vaddw_s16 (int32x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vaddw.s16 @var{q0}, @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vaddw_s8 (int16x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vaddw.s8 @var{q0}, @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vhadd_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vhadd.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vhadd_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vhadd.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vhadd_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vhadd.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vhadd_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vhadd.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vhadd_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vhadd.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vhadd_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vhadd.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vhaddq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vhadd.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vhaddq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vhadd.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vhaddq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vhadd.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vhaddq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vhadd.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vhaddq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vhadd.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vhaddq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vhadd.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vrhadd_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrhadd.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vrhadd_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrhadd.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vrhadd_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrhadd.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vrhadd_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrhadd.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vrhadd_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrhadd.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vrhadd_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrhadd.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vrhaddq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrhadd.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vrhaddq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrhadd.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vrhaddq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vrhadd.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vrhaddq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrhadd.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vrhaddq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrhadd.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vrhaddq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vrhadd.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vqadd_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vqadd_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vqadd_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqadd_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqadd_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vqadd_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vqadd_u64 (uint64x1_t, uint64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.u64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vqadd_s64 (int64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.s64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vqaddq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vqaddq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vqaddq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqaddq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vqaddq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vqaddq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vqaddq_u64 (uint64x2_t, uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.u64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vqaddq_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqadd.s64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vaddhn_u64 (uint64x2_t, uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vaddhn.i64 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vaddhn_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vaddhn.i32 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vaddhn_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vaddhn.i16 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vaddhn_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vaddhn.i64 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vaddhn_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vaddhn.i32 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vaddhn_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vaddhn.i16 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vraddhn_u64 (uint64x2_t, uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vraddhn.i64 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vraddhn_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vraddhn.i32 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vraddhn_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vraddhn.i16 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vraddhn_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vraddhn.i64 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vraddhn_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vraddhn.i32 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vraddhn_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vraddhn.i16 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Multiplication
+
+ at itemize @bullet
+ at item uint32x2_t vmul_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vmul_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vmul_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vmul_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vmul_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vmul_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vmul_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vmul_p8 (poly8x8_t, poly8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.p8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmulq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmulq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vmulq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmulq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmulq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vmulq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vmulq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vmulq_p8 (poly8x16_t, poly8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.p8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqdmulh_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmulh.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqdmulh_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmulh.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqdmulhq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmulh.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vqdmulhq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmulh.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqrdmulh_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqrdmulh.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqrdmulh_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqrdmulh.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqrdmulhq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqrdmulh.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vqrdmulhq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqrdmulh.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vmull_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmull.u32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmull_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmull.u16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmull_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmull.u8 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vmull_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmull.s32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmull_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmull.s16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmull_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmull.s8 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vmull_p8 (poly8x8_t, poly8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmull.p8 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vqdmull_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmull.s32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqdmull_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmull.s16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+
+
+ at subsubsection Multiply-accumulate
+
+ at itemize @bullet
+ at item uint32x2_t vmla_u32 (uint32x2_t, uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vmla_u16 (uint16x4_t, uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vmla_u8 (uint8x8_t, uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vmla_s32 (int32x2_t, int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vmla_s16 (int16x4_t, int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vmla_s8 (int8x8_t, int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vmla_f32 (float32x2_t, float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmlaq_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmlaq_u16 (uint16x8_t, uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vmlaq_u8 (uint8x16_t, uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmlaq_s32 (int32x4_t, int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmlaq_s16 (int16x8_t, int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vmlaq_s8 (int8x16_t, int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vmlaq_f32 (float32x4_t, float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vmlal_u32 (uint64x2_t, uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmlal.u32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmlal_u16 (uint32x4_t, uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmlal.u16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmlal_u8 (uint16x8_t, uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmlal.u8 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vmlal_s32 (int64x2_t, int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmlal.s32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmlal_s16 (int32x4_t, int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmlal.s16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmlal_s8 (int16x8_t, int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmlal.s8 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vqdmlal_s32 (int64x2_t, int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmlal.s32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqdmlal_s16 (int32x4_t, int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmlal.s16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+
+
+ at subsubsection Multiply-subtract
+
+ at itemize @bullet
+ at item uint32x2_t vmls_u32 (uint32x2_t, uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vmls_u16 (uint16x4_t, uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vmls_u8 (uint8x8_t, uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vmls_s32 (int32x2_t, int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vmls_s16 (int16x4_t, int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vmls_s8 (int8x8_t, int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vmls_f32 (float32x2_t, float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmlsq_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmlsq_u16 (uint16x8_t, uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vmlsq_u8 (uint8x16_t, uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmlsq_s32 (int32x4_t, int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmlsq_s16 (int16x8_t, int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vmlsq_s8 (int8x16_t, int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vmlsq_f32 (float32x4_t, float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vmlsl_u32 (uint64x2_t, uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmlsl.u32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmlsl_u16 (uint32x4_t, uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmlsl.u16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmlsl_u8 (uint16x8_t, uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmlsl.u8 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vmlsl_s32 (int64x2_t, int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmlsl.s32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmlsl_s16 (int32x4_t, int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmlsl.s16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmlsl_s8 (int16x8_t, int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmlsl.s8 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vqdmlsl_s32 (int64x2_t, int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmlsl.s32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqdmlsl_s16 (int32x4_t, int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmlsl.s16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+
+
+ at subsubsection Subtraction
+
+ at itemize @bullet
+ at item uint32x2_t vsub_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vsub_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vsub_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vsub_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vsub_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vsub_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vsub_u64 (uint64x1_t, uint64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vsub_s64 (int64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vsub_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vsubq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vsubq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vsubq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vsubq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vsubq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vsubq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vsubq_u64 (uint64x2_t, uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vsubq_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.i64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vsubq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vsub.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vsubl_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vsubl.u32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vsubl_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vsubl.u16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vsubl_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vsubl.u8 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vsubl_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vsubl.s32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vsubl_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vsubl.s16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vsubl_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vsubl.s8 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vsubw_u32 (uint64x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vsubw.u32 @var{q0}, @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vsubw_u16 (uint32x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vsubw.u16 @var{q0}, @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vsubw_u8 (uint16x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vsubw.u8 @var{q0}, @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vsubw_s32 (int64x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vsubw.s32 @var{q0}, @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vsubw_s16 (int32x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vsubw.s16 @var{q0}, @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vsubw_s8 (int16x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vsubw.s8 @var{q0}, @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vhsub_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vhsub.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vhsub_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vhsub.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vhsub_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vhsub.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vhsub_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vhsub.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vhsub_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vhsub.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vhsub_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vhsub.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vhsubq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vhsub.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vhsubq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vhsub.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vhsubq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vhsub.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vhsubq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vhsub.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vhsubq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vhsub.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vhsubq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vhsub.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vqsub_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vqsub_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vqsub_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqsub_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqsub_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vqsub_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vqsub_u64 (uint64x1_t, uint64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.u64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vqsub_s64 (int64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.s64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vqsubq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vqsubq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vqsubq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqsubq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vqsubq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vqsubq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vqsubq_u64 (uint64x2_t, uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.u64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vqsubq_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqsub.s64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vsubhn_u64 (uint64x2_t, uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vsubhn.i64 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vsubhn_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vsubhn.i32 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vsubhn_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vsubhn.i16 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vsubhn_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vsubhn.i64 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vsubhn_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vsubhn.i32 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vsubhn_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vsubhn.i16 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vrsubhn_u64 (uint64x2_t, uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrsubhn.i64 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vrsubhn_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrsubhn.i32 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vrsubhn_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrsubhn.i16 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vrsubhn_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrsubhn.i64 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vrsubhn_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrsubhn.i32 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vrsubhn_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrsubhn.i16 @var{d0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Comparison (equal-to)
+
+ at itemize @bullet
+ at item uint32x2_t vceq_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.i32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vceq_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.i16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vceq_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.i8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vceq_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.i32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vceq_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.i16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vceq_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.i8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vceq_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vceq_p8 (poly8x8_t, poly8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.i8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vceqq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.i32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vceqq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.i16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vceqq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.i8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vceqq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.i32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vceqq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.i16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vceqq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.i8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vceqq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vceqq_p8 (poly8x16_t, poly8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vceq.i8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Comparison (greater-than-or-equal-to)
+
+ at itemize @bullet
+ at item uint32x2_t vcge_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vcge_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vcge_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vcge_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vcge_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vcge_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vcge_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcgeq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vcgeq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vcgeq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcgeq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vcgeq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vcgeq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcgeq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Comparison (less-than-or-equal-to)
+
+ at itemize @bullet
+ at item uint32x2_t vcle_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vcle_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vcle_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vcle_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vcle_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vcle_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vcle_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcleq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vcleq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vcleq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcleq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vcleq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vcleq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcleq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcge.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Comparison (greater-than)
+
+ at itemize @bullet
+ at item uint32x2_t vcgt_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vcgt_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vcgt_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vcgt_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vcgt_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vcgt_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vcgt_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcgtq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vcgtq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vcgtq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcgtq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vcgtq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vcgtq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcgtq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Comparison (less-than)
+
+ at itemize @bullet
+ at item uint32x2_t vclt_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vclt_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vclt_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vclt_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vclt_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vclt_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vclt_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcltq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vcltq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vcltq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcltq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vcltq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vcltq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcltq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcgt.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Comparison (absolute greater-than-or-equal-to)
+
+ at itemize @bullet
+ at item uint32x2_t vcage_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vacge.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcageq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vacge.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Comparison (absolute less-than-or-equal-to)
+
+ at itemize @bullet
+ at item uint32x2_t vcale_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vacge.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcaleq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vacge.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Comparison (absolute greater-than)
+
+ at itemize @bullet
+ at item uint32x2_t vcagt_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vacgt.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcagtq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vacgt.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Comparison (absolute less-than)
+
+ at itemize @bullet
+ at item uint32x2_t vcalt_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vacgt.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcaltq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vacgt.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Test bits
+
+ at itemize @bullet
+ at item uint32x2_t vtst_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vtst.32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vtst_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vtst.16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vtst_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtst.8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vtst_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vtst.32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vtst_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vtst.16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vtst_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtst.8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vtst_p8 (poly8x8_t, poly8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtst.8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vtstq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vtst.32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vtstq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtst.16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vtstq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vtst.8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vtstq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vtst.32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vtstq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtst.16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vtstq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vtst.8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vtstq_p8 (poly8x16_t, poly8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vtst.8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Absolute difference
+
+ at itemize @bullet
+ at item uint32x2_t vabd_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vabd.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vabd_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vabd.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vabd_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vabd.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vabd_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vabd.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vabd_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vabd.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vabd_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vabd.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vabd_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vabd.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vabdq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vabd.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vabdq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vabd.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vabdq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vabd.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vabdq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vabd.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vabdq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vabd.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vabdq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vabd.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vabdq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vabd.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vabdl_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vabdl.u32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vabdl_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vabdl.u16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vabdl_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vabdl.u8 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vabdl_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vabdl.s32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vabdl_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vabdl.s16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vabdl_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vabdl.s8 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+
+
+ at subsubsection Absolute difference and accumulate
+
+ at itemize @bullet
+ at item uint32x2_t vaba_u32 (uint32x2_t, uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vaba.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vaba_u16 (uint16x4_t, uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vaba.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vaba_u8 (uint8x8_t, uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vaba.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vaba_s32 (int32x2_t, int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vaba.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vaba_s16 (int16x4_t, int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vaba.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vaba_s8 (int8x8_t, int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vaba.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vabaq_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vaba.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vabaq_u16 (uint16x8_t, uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vaba.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vabaq_u8 (uint8x16_t, uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vaba.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vabaq_s32 (int32x4_t, int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vaba.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vabaq_s16 (int16x8_t, int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vaba.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vabaq_s8 (int8x16_t, int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vaba.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vabal_u32 (uint64x2_t, uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vabal.u32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vabal_u16 (uint32x4_t, uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vabal.u16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vabal_u8 (uint16x8_t, uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vabal.u8 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vabal_s32 (int64x2_t, int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vabal.s32 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vabal_s16 (int32x4_t, int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vabal.s16 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vabal_s8 (int16x8_t, int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vabal.s8 @var{q0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+
+
+ at subsubsection Maximum
+
+ at itemize @bullet
+ at item uint32x2_t vmax_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmax.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vmax_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmax.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vmax_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmax.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vmax_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmax.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vmax_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmax.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vmax_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmax.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vmax_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmax.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmaxq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmax.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmaxq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmax.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vmaxq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmax.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmaxq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmax.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmaxq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmax.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vmaxq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmax.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vmaxq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmax.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Minimum
+
+ at itemize @bullet
+ at item uint32x2_t vmin_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmin.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vmin_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmin.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vmin_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmin.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vmin_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmin.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vmin_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmin.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vmin_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmin.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vmin_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmin.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vminq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmin.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vminq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmin.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vminq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmin.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vminq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmin.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vminq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmin.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vminq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmin.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vminq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmin.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Pairwise add
+
+ at itemize @bullet
+ at item uint32x2_t vpadd_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vpadd.i32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vpadd_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vpadd.i16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vpadd_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vpadd.i8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vpadd_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vpadd.i32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vpadd_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vpadd.i16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vpadd_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vpadd.i8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vpadd_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vpadd.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vpaddl_u32 (uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vpaddl.u32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vpaddl_u16 (uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vpaddl.u16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vpaddl_u8 (uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vpaddl.u8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vpaddl_s32 (int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vpaddl.s32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vpaddl_s16 (int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vpaddl.s16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vpaddl_s8 (int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vpaddl.s8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vpaddlq_u32 (uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vpaddl.u32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vpaddlq_u16 (uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vpaddl.u16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vpaddlq_u8 (uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vpaddl.u8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vpaddlq_s32 (int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vpaddl.s32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vpaddlq_s16 (int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vpaddl.s16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vpaddlq_s8 (int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vpaddl.s8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Pairwise add, single_opcode widen and accumulate
+
+ at itemize @bullet
+ at item uint64x1_t vpadal_u32 (uint64x1_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vpadal.u32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vpadal_u16 (uint32x2_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vpadal.u16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vpadal_u8 (uint16x4_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vpadal.u8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vpadal_s32 (int64x1_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vpadal.s32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vpadal_s16 (int32x2_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vpadal.s16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vpadal_s8 (int16x4_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vpadal.s8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vpadalq_u32 (uint64x2_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vpadal.u32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vpadalq_u16 (uint32x4_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vpadal.u16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vpadalq_u8 (uint16x8_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vpadal.u8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vpadalq_s32 (int64x2_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vpadal.s32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vpadalq_s16 (int32x4_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vpadal.s16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vpadalq_s8 (int16x8_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vpadal.s8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Folding maximum
+
+ at itemize @bullet
+ at item uint32x2_t vpmax_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vpmax.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vpmax_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vpmax.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vpmax_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vpmax.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vpmax_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vpmax.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vpmax_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vpmax.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vpmax_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vpmax.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vpmax_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vpmax.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+
+
+ at subsubsection Folding minimum
+
+ at itemize @bullet
+ at item uint32x2_t vpmin_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vpmin.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vpmin_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vpmin.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vpmin_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vpmin.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vpmin_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vpmin.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vpmin_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vpmin.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vpmin_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vpmin.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vpmin_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vpmin.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+
+
+ at subsubsection Reciprocal step
+
+ at itemize @bullet
+ at item float32x2_t vrecps_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrecps.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vrecpsq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrecps.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vrsqrts_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrsqrts.f32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vrsqrtsq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrsqrts.f32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Vector shift left
+
+ at itemize @bullet
+ at item uint32x2_t vshl_u32 (uint32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vshl_u16 (uint16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vshl_u8 (uint8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vshl_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vshl_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vshl_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vshl_u64 (uint64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.u64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vshl_s64 (int64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.s64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vshlq_u32 (uint32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vshlq_u16 (uint16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vshlq_u8 (uint8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vshlq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vshlq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vshlq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vshlq_u64 (uint64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.u64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vshlq_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vshl.s64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vrshl_u32 (uint32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vrshl_u16 (uint16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vrshl_u8 (uint8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vrshl_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vrshl_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vrshl_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vrshl_u64 (uint64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.u64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vrshl_s64 (int64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.s64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vrshlq_u32 (uint32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vrshlq_u16 (uint16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vrshlq_u8 (uint8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vrshlq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vrshlq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vrshlq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vrshlq_u64 (uint64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.u64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vrshlq_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrshl.s64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vqshl_u32 (uint32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vqshl_u16 (uint16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vqshl_u8 (uint8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqshl_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqshl_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vqshl_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vqshl_u64 (uint64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vqshl_s64 (int64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vqshlq_u32 (uint32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vqshlq_u16 (uint16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vqshlq_u8 (uint8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqshlq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vqshlq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vqshlq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vqshlq_u64 (uint64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vqshlq_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vqrshl_u32 (uint32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.u32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vqrshl_u16 (uint16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.u16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vqrshl_u8 (uint8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.u8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqrshl_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.s32 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqrshl_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.s16 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vqrshl_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.s8 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vqrshl_u64 (uint64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.u64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vqrshl_s64 (int64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.s64 @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vqrshlq_u32 (uint32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.u32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vqrshlq_u16 (uint16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.u16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vqrshlq_u8 (uint8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.u8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqrshlq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.s32 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vqrshlq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.s16 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vqrshlq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.s8 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vqrshlq_u64 (uint64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.u64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vqrshlq_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqrshl.s64 @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Vector shift left by constant
+
+ at itemize @bullet
+ at item uint32x2_t vshl_n_u32 (uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vshl_n_u16 (uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vshl_n_u8 (uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vshl_n_s32 (int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vshl_n_s16 (int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vshl_n_s8 (int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vshl_n_u64 (uint64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vshl_n_s64 (int64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vshlq_n_u32 (uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vshlq_n_u16 (uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vshlq_n_u8 (uint8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vshlq_n_s32 (int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vshlq_n_s16 (int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vshlq_n_s8 (int8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vshlq_n_u64 (uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vshlq_n_s64 (int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshl.i64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vqshl_n_u32 (uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vqshl_n_u16 (uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vqshl_n_u8 (uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqshl_n_s32 (int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqshl_n_s16 (int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vqshl_n_s8 (int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vqshl_n_u64 (uint64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vqshl_n_s64 (int64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vqshlq_n_u32 (uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vqshlq_n_u16 (uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vqshlq_n_u8 (uint8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqshlq_n_s32 (int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vqshlq_n_s16 (int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vqshlq_n_s8 (int8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vqshlq_n_u64 (uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.u64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vqshlq_n_s64 (int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshl.s64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vqshlu_n_s64 (int64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshlu.s64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vqshlu_n_s32 (int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshlu.s32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vqshlu_n_s16 (int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshlu.s16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vqshlu_n_s8 (int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshlu.s8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vqshluq_n_s64 (int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshlu.s64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vqshluq_n_s32 (int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshlu.s32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vqshluq_n_s16 (int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshlu.s16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vqshluq_n_s8 (int8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshlu.s8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vshll_n_u32 (uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshll.u32 @var{q0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vshll_n_u16 (uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshll.u16 @var{q0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vshll_n_u8 (uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshll.u8 @var{q0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vshll_n_s32 (int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshll.s32 @var{q0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vshll_n_s16 (int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshll.s16 @var{q0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vshll_n_s8 (int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshll.s8 @var{q0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+
+
+ at subsubsection Vector shift right by constant
+
+ at itemize @bullet
+ at item uint32x2_t vshr_n_u32 (uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.u32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vshr_n_u16 (uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.u16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vshr_n_u8 (uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.u8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vshr_n_s32 (int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.s32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vshr_n_s16 (int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.s16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vshr_n_s8 (int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.s8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vshr_n_u64 (uint64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.u64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vshr_n_s64 (int64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.s64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vshrq_n_u32 (uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.u32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vshrq_n_u16 (uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.u16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vshrq_n_u8 (uint8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.u8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vshrq_n_s32 (int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.s32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vshrq_n_s16 (int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.s16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vshrq_n_s8 (int8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.s8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vshrq_n_u64 (uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.u64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vshrq_n_s64 (int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshr.s64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vrshr_n_u32 (uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.u32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vrshr_n_u16 (uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.u16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vrshr_n_u8 (uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.u8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vrshr_n_s32 (int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.s32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vrshr_n_s16 (int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.s16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vrshr_n_s8 (int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.s8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vrshr_n_u64 (uint64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.u64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vrshr_n_s64 (int64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.s64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vrshrq_n_u32 (uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.u32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vrshrq_n_u16 (uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.u16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vrshrq_n_u8 (uint8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.u8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vrshrq_n_s32 (int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.s32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vrshrq_n_s16 (int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.s16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vrshrq_n_s8 (int8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.s8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vrshrq_n_u64 (uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.u64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vrshrq_n_s64 (int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshr.s64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vshrn_n_u64 (uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshrn.i64 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vshrn_n_u32 (uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshrn.i32 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vshrn_n_u16 (uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshrn.i16 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vshrn_n_s64 (int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshrn.i64 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vshrn_n_s32 (int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshrn.i32 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vshrn_n_s16 (int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vshrn.i16 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vrshrn_n_u64 (uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshrn.i64 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vrshrn_n_u32 (uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshrn.i32 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vrshrn_n_u16 (uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshrn.i16 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vrshrn_n_s64 (int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshrn.i64 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vrshrn_n_s32 (int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshrn.i32 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vrshrn_n_s16 (int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrshrn.i16 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vqshrn_n_u64 (uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshrn.u64 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vqshrn_n_u32 (uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshrn.u32 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vqshrn_n_u16 (uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshrn.u16 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqshrn_n_s64 (int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshrn.s64 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqshrn_n_s32 (int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshrn.s32 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vqshrn_n_s16 (int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshrn.s16 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vqrshrn_n_u64 (uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqrshrn.u64 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vqrshrn_n_u32 (uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqrshrn.u32 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vqrshrn_n_u16 (uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqrshrn.u16 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqrshrn_n_s64 (int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqrshrn.s64 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqrshrn_n_s32 (int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqrshrn.s32 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vqrshrn_n_s16 (int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqrshrn.s16 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vqshrun_n_s64 (int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshrun.s64 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vqshrun_n_s32 (int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshrun.s32 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vqshrun_n_s16 (int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqshrun.s16 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vqrshrun_n_s64 (int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqrshrun.s64 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vqrshrun_n_s32 (int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqrshrun.s32 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vqrshrun_n_s16 (int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqrshrun.s16 @var{d0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+
+
+ at subsubsection Vector shift right by constant and accumulate
+
+ at itemize @bullet
+ at item uint32x2_t vsra_n_u32 (uint32x2_t, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.u32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vsra_n_u16 (uint16x4_t, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.u16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vsra_n_u8 (uint8x8_t, uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.u8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vsra_n_s32 (int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.s32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vsra_n_s16 (int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.s16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vsra_n_s8 (int8x8_t, int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.s8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vsra_n_u64 (uint64x1_t, uint64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.u64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vsra_n_s64 (int64x1_t, int64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.s64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vsraq_n_u32 (uint32x4_t, uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.u32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vsraq_n_u16 (uint16x8_t, uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.u16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vsraq_n_u8 (uint8x16_t, uint8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.u8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vsraq_n_s32 (int32x4_t, int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.s32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vsraq_n_s16 (int16x8_t, int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.s16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vsraq_n_s8 (int8x16_t, int8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.s8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vsraq_n_u64 (uint64x2_t, uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.u64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vsraq_n_s64 (int64x2_t, int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsra.s64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vrsra_n_u32 (uint32x2_t, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.u32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vrsra_n_u16 (uint16x4_t, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.u16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vrsra_n_u8 (uint8x8_t, uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.u8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vrsra_n_s32 (int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.s32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vrsra_n_s16 (int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.s16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vrsra_n_s8 (int8x8_t, int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.s8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vrsra_n_u64 (uint64x1_t, uint64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.u64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vrsra_n_s64 (int64x1_t, int64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.s64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vrsraq_n_u32 (uint32x4_t, uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.u32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vrsraq_n_u16 (uint16x8_t, uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.u16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vrsraq_n_u8 (uint8x16_t, uint8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.u8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vrsraq_n_s32 (int32x4_t, int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.s32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vrsraq_n_s16 (int16x8_t, int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.s16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vrsraq_n_s8 (int8x16_t, int8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.s8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vrsraq_n_u64 (uint64x2_t, uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.u64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vrsraq_n_s64 (int64x2_t, int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vrsra.s64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+
+
+ at subsubsection Vector shift right and insert
+
+ at itemize @bullet
+ at item uint32x2_t vsri_n_u32 (uint32x2_t, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vsri_n_u16 (uint16x4_t, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vsri_n_u8 (uint8x8_t, uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vsri_n_s32 (int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vsri_n_s16 (int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vsri_n_s8 (int8x8_t, int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vsri_n_u64 (uint64x1_t, uint64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vsri_n_s64 (int64x1_t, int64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vsri_n_p16 (poly16x4_t, poly16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vsri_n_p8 (poly8x8_t, poly8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vsriq_n_u32 (uint32x4_t, uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vsriq_n_u16 (uint16x8_t, uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vsriq_n_u8 (uint8x16_t, uint8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vsriq_n_s32 (int32x4_t, int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vsriq_n_s16 (int16x8_t, int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vsriq_n_s8 (int8x16_t, int8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vsriq_n_u64 (uint64x2_t, uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vsriq_n_s64 (int64x2_t, int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vsriq_n_p16 (poly16x8_t, poly16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vsriq_n_p8 (poly8x16_t, poly8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsri.8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+
+
+ at subsubsection Vector shift left and insert
+
+ at itemize @bullet
+ at item uint32x2_t vsli_n_u32 (uint32x2_t, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vsli_n_u16 (uint16x4_t, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vsli_n_u8 (uint8x8_t, uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vsli_n_s32 (int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vsli_n_s16 (int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vsli_n_s8 (int8x8_t, int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vsli_n_u64 (uint64x1_t, uint64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vsli_n_s64 (int64x1_t, int64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.64 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vsli_n_p16 (poly16x4_t, poly16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.16 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vsli_n_p8 (poly8x8_t, poly8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.8 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vsliq_n_u32 (uint32x4_t, uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vsliq_n_u16 (uint16x8_t, uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vsliq_n_u8 (uint8x16_t, uint8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vsliq_n_s32 (int32x4_t, int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vsliq_n_s16 (int16x8_t, int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vsliq_n_s8 (int8x16_t, int8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vsliq_n_u64 (uint64x2_t, uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vsliq_n_s64 (int64x2_t, int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.64 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vsliq_n_p16 (poly16x8_t, poly16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.16 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vsliq_n_p8 (poly8x16_t, poly8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vsli.8 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+
+
+ at subsubsection Absolute value
+
+ at itemize @bullet
+ at item float32x2_t vabs_f32 (float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vabs.f32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vabs_s32 (int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vabs.s32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vabs_s16 (int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vabs.s16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vabs_s8 (int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vabs.s8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vabsq_f32 (float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vabs.f32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vabsq_s32 (int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vabs.s32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vabsq_s16 (int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vabs.s16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vabsq_s8 (int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vabs.s8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqabs_s32 (int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqabs.s32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqabs_s16 (int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqabs.s16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vqabs_s8 (int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqabs.s8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqabsq_s32 (int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqabs.s32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vqabsq_s16 (int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqabs.s16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vqabsq_s8 (int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vqabs.s8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Negation
+
+ at itemize @bullet
+ at item float32x2_t vneg_f32 (float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vneg.f32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vneg_s32 (int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vneg.s32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vneg_s16 (int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vneg.s16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vneg_s8 (int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vneg.s8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vnegq_f32 (float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vneg.f32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vnegq_s32 (int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vneg.s32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vnegq_s16 (int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vneg.s16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vnegq_s8 (int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vneg.s8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqneg_s32 (int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqneg.s32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqneg_s16 (int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqneg.s16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vqneg_s8 (int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqneg.s8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqnegq_s32 (int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqneg.s32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vqnegq_s16 (int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqneg.s16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vqnegq_s8 (int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vqneg.s8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Bitwise not
+
+ at itemize @bullet
+ at item uint32x2_t vmvn_u32 (uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmvn @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vmvn_u16 (uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmvn @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vmvn_u8 (uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmvn @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vmvn_s32 (int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmvn @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vmvn_s16 (int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmvn @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vmvn_s8 (int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmvn @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vmvn_p8 (poly8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmvn @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmvnq_u32 (uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmvn @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmvnq_u16 (uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmvn @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vmvnq_u8 (uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmvn @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmvnq_s32 (int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmvn @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmvnq_s16 (int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmvn @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vmvnq_s8 (int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmvn @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vmvnq_p8 (poly8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmvn @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Count leading sign bits
+
+ at itemize @bullet
+ at item int32x2_t vcls_s32 (int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcls.s32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vcls_s16 (int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcls.s16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vcls_s8 (int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcls.s8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vclsq_s32 (int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcls.s32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vclsq_s16 (int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcls.s16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vclsq_s8 (int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vcls.s8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Count leading zeros
+
+ at itemize @bullet
+ at item uint32x2_t vclz_u32 (uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vclz.i32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vclz_u16 (uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vclz.i16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vclz_u8 (uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vclz.i8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vclz_s32 (int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vclz.i32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vclz_s16 (int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vclz.i16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vclz_s8 (int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vclz.i8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vclzq_u32 (uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vclz.i32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vclzq_u16 (uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vclz.i16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vclzq_u8 (uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vclz.i8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vclzq_s32 (int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vclz.i32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vclzq_s16 (int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vclz.i16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vclzq_s8 (int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vclz.i8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Count number of set bits
+
+ at itemize @bullet
+ at item uint8x8_t vcnt_u8 (uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcnt.8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vcnt_s8 (int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcnt.8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vcnt_p8 (poly8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vcnt.8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vcntq_u8 (uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vcnt.8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vcntq_s8 (int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vcnt.8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vcntq_p8 (poly8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vcnt.8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Reciprocal estimate
+
+ at itemize @bullet
+ at item float32x2_t vrecpe_f32 (float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrecpe.f32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vrecpe_u32 (uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrecpe.u32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vrecpeq_f32 (float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrecpe.f32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vrecpeq_u32 (uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrecpe.u32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Reciprocal square-root estimate
+
+ at itemize @bullet
+ at item float32x2_t vrsqrte_f32 (float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrsqrte.f32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vrsqrte_u32 (uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrsqrte.u32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vrsqrteq_f32 (float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrsqrte.f32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vrsqrteq_u32 (uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrsqrte.u32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Get lanes from a vector
+
+ at itemize @bullet
+ at item uint32_t vget_lane_u32 (uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.u32 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16_t vget_lane_u16 (uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.u16 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8_t vget_lane_u8 (uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.u8 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32_t vget_lane_s32 (int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.s32 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16_t vget_lane_s16 (int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.s16 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8_t vget_lane_s8 (int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.s8 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32_t vget_lane_f32 (float32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.f32 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16_t vget_lane_p16 (poly16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.u16 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8_t vget_lane_p8 (poly8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.u8 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64_t vget_lane_u64 (uint64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{r0}, @var{r0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64_t vget_lane_s64 (int64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{r0}, @var{r0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32_t vgetq_lane_u32 (uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.u32 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16_t vgetq_lane_u16 (uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.u16 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8_t vgetq_lane_u8 (uint8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.u8 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32_t vgetq_lane_s32 (int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.s32 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16_t vgetq_lane_s16 (int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.s16 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8_t vgetq_lane_s8 (int8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.s8 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32_t vgetq_lane_f32 (float32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.f32 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16_t vgetq_lane_p16 (poly16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.u16 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8_t vgetq_lane_p8 (poly8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.u8 @var{r0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64_t vgetq_lane_u64 (uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{r0}, @var{r0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64_t vgetq_lane_s64 (int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{r0}, @var{r0}, @var{d0}}
+ at end itemize
+
+
+
+
+ at subsubsection Set lanes in a vector
+
+ at itemize @bullet
+ at item uint32x2_t vset_lane_u32 (uint32_t, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.32 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vset_lane_u16 (uint16_t, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.16 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vset_lane_u8 (uint8_t, uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.8 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vset_lane_s32 (int32_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.32 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vset_lane_s16 (int16_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.16 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vset_lane_s8 (int8_t, int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.8 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vset_lane_f32 (float32_t, float32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.32 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vset_lane_p16 (poly16_t, poly16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.16 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vset_lane_p8 (poly8_t, poly8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.8 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vset_lane_u64 (uint64_t, uint64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{r0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vset_lane_s64 (int64_t, int64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{r0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vsetq_lane_u32 (uint32_t, uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.32 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vsetq_lane_u16 (uint16_t, uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.16 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vsetq_lane_u8 (uint8_t, uint8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.8 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vsetq_lane_s32 (int32_t, int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.32 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vsetq_lane_s16 (int16_t, int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.16 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vsetq_lane_s8 (int8_t, int8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.8 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vsetq_lane_f32 (float32_t, float32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.32 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vsetq_lane_p16 (poly16_t, poly16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.16 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vsetq_lane_p8 (poly8_t, poly8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov.8 @var{d0}[@var{0}], @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vsetq_lane_u64 (uint64_t, uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{r0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vsetq_lane_s64 (int64_t, int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{r0}, @var{r0}}
+ at end itemize
+
+
+
+
+ at subsubsection Create vector from literal bit pattern
+
+ at itemize @bullet
+ at item uint32x2_t vcreate_u32 (uint64_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vcreate_u16 (uint64_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vcreate_u8 (uint64_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vcreate_s32 (uint64_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vcreate_s16 (uint64_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vcreate_s8 (uint64_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vcreate_u64 (uint64_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vcreate_s64 (uint64_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vcreate_f32 (uint64_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vcreate_p16 (uint64_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vcreate_p8 (uint64_t)
+ at end itemize
+
+
+
+
+ at subsubsection Set all lanes to the same value
+
+ at itemize @bullet
+ at item uint32x2_t vdup_n_u32 (uint32_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vdup_n_u16 (uint16_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vdup_n_u8 (uint8_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vdup_n_s32 (int32_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vdup_n_s16 (int16_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vdup_n_s8 (int8_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vdup_n_f32 (float32_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vdup_n_p16 (poly16_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vdup_n_p8 (poly8_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vdup_n_u64 (uint64_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{r0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vdup_n_s64 (int64_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{r0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vdupq_n_u32 (uint32_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vdupq_n_u16 (uint16_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vdupq_n_u8 (uint8_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vdupq_n_s32 (int32_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vdupq_n_s16 (int16_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vdupq_n_s8 (int8_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vdupq_n_f32 (float32_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vdupq_n_p16 (poly16_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vdupq_n_p8 (poly8_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vdupq_n_u64 (uint64_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{r0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vdupq_n_s64 (int64_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{r0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vmov_n_u32 (uint32_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vmov_n_u16 (uint16_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vmov_n_u8 (uint8_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vmov_n_s32 (int32_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vmov_n_s16 (int16_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vmov_n_s8 (int8_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vmov_n_f32 (float32_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vmov_n_p16 (poly16_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vmov_n_p8 (poly8_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{d0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vmov_n_u64 (uint64_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{r0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vmov_n_s64 (int64_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{r0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmovq_n_u32 (uint32_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmovq_n_u16 (uint16_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vmovq_n_u8 (uint8_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmovq_n_s32 (int32_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmovq_n_s16 (int16_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vmovq_n_s8 (int8_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vmovq_n_f32 (float32_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vmovq_n_p16 (poly16_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vmovq_n_p8 (poly8_t)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{q0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vmovq_n_u64 (uint64_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{r0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vmovq_n_s64 (int64_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{r0}, @var{r0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vdup_lane_u32 (uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vdup_lane_u16 (uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vdup_lane_u8 (uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vdup_lane_s32 (int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vdup_lane_s16 (int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vdup_lane_s8 (int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vdup_lane_f32 (float32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vdup_lane_p16 (poly16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vdup_lane_p8 (poly8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vdup_lane_u64 (uint64x1_t, const int)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vdup_lane_s64 (int64x1_t, const int)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vdupq_lane_u32 (uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vdupq_lane_u16 (uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vdupq_lane_u8 (uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vdupq_lane_s32 (int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vdupq_lane_s16 (int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vdupq_lane_s8 (int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vdupq_lane_f32 (float32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.32 @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vdupq_lane_p16 (poly16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.16 @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vdupq_lane_p8 (poly8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vdup.8 @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vdupq_lane_u64 (uint64x1_t, const int)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vdupq_lane_s64 (int64x1_t, const int)
+ at end itemize
+
+
+
+
+ at subsubsection Combining vectors
+
+ at itemize @bullet
+ at item uint32x4_t vcombine_u32 (uint32x2_t, uint32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vcombine_u16 (uint16x4_t, uint16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vcombine_u8 (uint8x8_t, uint8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vcombine_s32 (int32x2_t, int32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vcombine_s16 (int16x4_t, int16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vcombine_s8 (int8x8_t, int8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vcombine_u64 (uint64x1_t, uint64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vcombine_s64 (int64x1_t, int64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vcombine_f32 (float32x2_t, float32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vcombine_p16 (poly16x4_t, poly16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vcombine_p8 (poly8x8_t, poly8x8_t)
+ at end itemize
+
+
+
+
+ at subsubsection Splitting vectors
+
+ at itemize @bullet
+ at item uint32x2_t vget_high_u32 (uint32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vget_high_u16 (uint16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vget_high_u8 (uint8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vget_high_s32 (int32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vget_high_s16 (int16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vget_high_s8 (int8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vget_high_u64 (uint64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vget_high_s64 (int64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vget_high_f32 (float32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vget_high_p16 (poly16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vget_high_p8 (poly8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vget_low_u32 (uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vget_low_u16 (uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vget_low_u8 (uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vget_low_s32 (int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vget_low_s16 (int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vget_low_s8 (int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vget_low_u64 (uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vget_low_s64 (int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vget_low_f32 (float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vget_low_p16 (poly16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vget_low_p8 (poly8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vmov @var{d0}, @var{d0}}
+ at end itemize
+
+
+
+
+ at subsubsection Conversions
+
+ at itemize @bullet
+ at item float32x2_t vcvt_f32_u32 (uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcvt.f32.u32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vcvt_f32_s32 (int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcvt.f32.s32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vcvt_u32_f32 (float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcvt.u32.f32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vcvt_s32_f32 (float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vcvt.s32.f32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vcvtq_f32_u32 (uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcvt.f32.u32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vcvtq_f32_s32 (int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcvt.f32.s32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcvtq_u32_f32 (float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcvt.u32.f32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vcvtq_s32_f32 (float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vcvt.s32.f32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vcvt_n_f32_u32 (uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vcvt.f32.u32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vcvt_n_f32_s32 (int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vcvt.f32.s32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vcvt_n_u32_f32 (float32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vcvt.u32.f32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vcvt_n_s32_f32 (float32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vcvt.s32.f32 @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vcvtq_n_f32_u32 (uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vcvt.f32.u32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vcvtq_n_f32_s32 (int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vcvt.f32.s32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vcvtq_n_u32_f32 (float32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vcvt.u32.f32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vcvtq_n_s32_f32 (float32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vcvt.s32.f32 @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+
+
+ at subsubsection Move, single_opcode narrowing
+
+ at itemize @bullet
+ at item uint32x2_t vmovn_u64 (uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmovn.i64 @var{d0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vmovn_u32 (uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmovn.i32 @var{d0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vmovn_u16 (uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmovn.i16 @var{d0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vmovn_s64 (int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmovn.i64 @var{d0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vmovn_s32 (int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmovn.i32 @var{d0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vmovn_s16 (int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmovn.i16 @var{d0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vqmovn_u64 (uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqmovn.u64 @var{d0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vqmovn_u32 (uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqmovn.u32 @var{d0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vqmovn_u16 (uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqmovn.u16 @var{d0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqmovn_s64 (int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqmovn.s64 @var{d0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqmovn_s32 (int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqmovn.s32 @var{d0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vqmovn_s16 (int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqmovn.s16 @var{d0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vqmovun_s64 (int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vqmovun.s64 @var{d0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vqmovun_s32 (int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vqmovun.s32 @var{d0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vqmovun_s16 (int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vqmovun.s16 @var{d0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Move, single_opcode long
+
+ at itemize @bullet
+ at item uint64x2_t vmovl_u32 (uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmovl.u32 @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmovl_u16 (uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmovl.u16 @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmovl_u8 (uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmovl.u8 @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vmovl_s32 (int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vmovl.s32 @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmovl_s16 (int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vmovl.s16 @var{q0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmovl_s8 (int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vmovl.s8 @var{q0}, @var{d0}}
+ at end itemize
+
+
+
+
+ at subsubsection Table lookup
+
+ at itemize @bullet
+ at item poly8x8_t vtbl1_p8 (poly8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbl.8 @var{d0}, @{@var{d0}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vtbl1_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbl.8 @var{d0}, @{@var{d0}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vtbl1_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbl.8 @var{d0}, @{@var{d0}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vtbl2_p8 (poly8x8x2_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbl.8 @var{d0}, @{@var{d0}, @var{d1}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vtbl2_s8 (int8x8x2_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbl.8 @var{d0}, @{@var{d0}, @var{d1}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vtbl2_u8 (uint8x8x2_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbl.8 @var{d0}, @{@var{d0}, @var{d1}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vtbl3_p8 (poly8x8x3_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbl.8 @var{d0}, @{@var{d0}, @var{d1}, @var{d2}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vtbl3_s8 (int8x8x3_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbl.8 @var{d0}, @{@var{d0}, @var{d1}, @var{d2}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vtbl3_u8 (uint8x8x3_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbl.8 @var{d0}, @{@var{d0}, @var{d1}, @var{d2}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vtbl4_p8 (poly8x8x4_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbl.8 @var{d0}, @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vtbl4_s8 (int8x8x4_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbl.8 @var{d0}, @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vtbl4_u8 (uint8x8x4_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbl.8 @var{d0}, @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, @var{d0}}
+ at end itemize
+
+
+
+
+ at subsubsection Extended table lookup
+
+ at itemize @bullet
+ at item poly8x8_t vtbx1_p8 (poly8x8_t, poly8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbx.8 @var{d0}, @{@var{d0}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vtbx1_s8 (int8x8_t, int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbx.8 @var{d0}, @{@var{d0}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vtbx1_u8 (uint8x8_t, uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbx.8 @var{d0}, @{@var{d0}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vtbx2_p8 (poly8x8_t, poly8x8x2_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbx.8 @var{d0}, @{@var{d0}, @var{d1}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vtbx2_s8 (int8x8_t, int8x8x2_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbx.8 @var{d0}, @{@var{d0}, @var{d1}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vtbx2_u8 (uint8x8_t, uint8x8x2_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbx.8 @var{d0}, @{@var{d0}, @var{d1}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vtbx3_p8 (poly8x8_t, poly8x8x3_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbx.8 @var{d0}, @{@var{d0}, @var{d1}, @var{d2}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vtbx3_s8 (int8x8_t, int8x8x3_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbx.8 @var{d0}, @{@var{d0}, @var{d1}, @var{d2}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vtbx3_u8 (uint8x8_t, uint8x8x3_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbx.8 @var{d0}, @{@var{d0}, @var{d1}, @var{d2}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vtbx4_p8 (poly8x8_t, poly8x8x4_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbx.8 @var{d0}, @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vtbx4_s8 (int8x8_t, int8x8x4_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbx.8 @var{d0}, @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vtbx4_u8 (uint8x8_t, uint8x8x4_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtbx.8 @var{d0}, @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, @var{d0}}
+ at end itemize
+
+
+
+
+ at subsubsection Multiply, lane
+
+ at itemize @bullet
+ at item float32x2_t vmul_lane_f32 (float32x2_t, float32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmul.f32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vmul_lane_u32 (uint32x2_t, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmul.i32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vmul_lane_u16 (uint16x4_t, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmul.i16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vmul_lane_s32 (int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmul.i32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vmul_lane_s16 (int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmul.i16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vmulq_lane_f32 (float32x4_t, float32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmul.f32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmulq_lane_u32 (uint32x4_t, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmul.i32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmulq_lane_u16 (uint16x8_t, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmul.i16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmulq_lane_s32 (int32x4_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmul.i32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmulq_lane_s16 (int16x8_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmul.i16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Long multiply, lane
+
+ at itemize @bullet
+ at item uint64x2_t vmull_lane_u32 (uint32x2_t, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmull.u32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmull_lane_u16 (uint16x4_t, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmull.u16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vmull_lane_s32 (int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmull.s32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmull_lane_s16 (int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmull.s16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Saturating doubling long multiply, lane
+
+ at itemize @bullet
+ at item int64x2_t vqdmull_lane_s32 (int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqdmull.s32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqdmull_lane_s16 (int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqdmull.s16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Saturating doubling multiply high, lane
+
+ at itemize @bullet
+ at item int32x4_t vqdmulhq_lane_s32 (int32x4_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqdmulh.s32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vqdmulhq_lane_s16 (int16x8_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqdmulh.s16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqdmulh_lane_s32 (int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqdmulh.s32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqdmulh_lane_s16 (int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqdmulh.s16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqrdmulhq_lane_s32 (int32x4_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqrdmulh.s32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vqrdmulhq_lane_s16 (int16x8_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqrdmulh.s16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqrdmulh_lane_s32 (int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqrdmulh.s32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqrdmulh_lane_s16 (int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqrdmulh.s16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Multiply-accumulate, lane
+
+ at itemize @bullet
+ at item float32x2_t vmla_lane_f32 (float32x2_t, float32x2_t, float32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmla.f32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vmla_lane_u32 (uint32x2_t, uint32x2_t, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmla.i32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vmla_lane_u16 (uint16x4_t, uint16x4_t, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmla.i16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vmla_lane_s32 (int32x2_t, int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmla.i32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vmla_lane_s16 (int16x4_t, int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmla.i16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vmlaq_lane_f32 (float32x4_t, float32x4_t, float32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmla.f32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmlaq_lane_u32 (uint32x4_t, uint32x4_t, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmla.i32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmlaq_lane_u16 (uint16x8_t, uint16x8_t, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmla.i16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmlaq_lane_s32 (int32x4_t, int32x4_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmla.i32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmlaq_lane_s16 (int16x8_t, int16x8_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmla.i16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vmlal_lane_u32 (uint64x2_t, uint32x2_t, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmlal.u32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmlal_lane_u16 (uint32x4_t, uint16x4_t, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmlal.u16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vmlal_lane_s32 (int64x2_t, int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmlal.s32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmlal_lane_s16 (int32x4_t, int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmlal.s16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vqdmlal_lane_s32 (int64x2_t, int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqdmlal.s32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqdmlal_lane_s16 (int32x4_t, int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqdmlal.s16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Multiply-subtract, lane
+
+ at itemize @bullet
+ at item float32x2_t vmls_lane_f32 (float32x2_t, float32x2_t, float32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmls.f32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vmls_lane_u32 (uint32x2_t, uint32x2_t, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmls.i32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vmls_lane_u16 (uint16x4_t, uint16x4_t, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmls.i16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vmls_lane_s32 (int32x2_t, int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmls.i32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vmls_lane_s16 (int16x4_t, int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmls.i16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vmlsq_lane_f32 (float32x4_t, float32x4_t, float32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmls.f32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmlsq_lane_u32 (uint32x4_t, uint32x4_t, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmls.i32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmlsq_lane_u16 (uint16x8_t, uint16x8_t, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmls.i16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmlsq_lane_s32 (int32x4_t, int32x4_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmls.i32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmlsq_lane_s16 (int16x8_t, int16x8_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmls.i16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vmlsl_lane_u32 (uint64x2_t, uint32x2_t, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmlsl.u32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmlsl_lane_u16 (uint32x4_t, uint16x4_t, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmlsl.u16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vmlsl_lane_s32 (int64x2_t, int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmlsl.s32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmlsl_lane_s16 (int32x4_t, int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vmlsl.s16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vqdmlsl_lane_s32 (int64x2_t, int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqdmlsl.s32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqdmlsl_lane_s16 (int32x4_t, int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vqdmlsl.s16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Vector multiply by scalar
+
+ at itemize @bullet
+ at item float32x2_t vmul_n_f32 (float32x2_t, float32_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.f32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vmul_n_u32 (uint32x2_t, uint32_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vmul_n_u16 (uint16x4_t, uint16_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vmul_n_s32 (int32x2_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vmul_n_s16 (int16x4_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vmulq_n_f32 (float32x4_t, float32_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.f32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmulq_n_u32 (uint32x4_t, uint32_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmulq_n_u16 (uint16x8_t, uint16_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmulq_n_s32 (int32x4_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmulq_n_s16 (int16x8_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vmul.i16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Vector long multiply by scalar
+
+ at itemize @bullet
+ at item uint64x2_t vmull_n_u32 (uint32x2_t, uint32_t)
+@*@emph{Form of expected instruction(s):} @code{vmull.u32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmull_n_u16 (uint16x4_t, uint16_t)
+@*@emph{Form of expected instruction(s):} @code{vmull.u16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vmull_n_s32 (int32x2_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vmull.s32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmull_n_s16 (int16x4_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vmull.s16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Vector saturating doubling long multiply by scalar
+
+ at itemize @bullet
+ at item int64x2_t vqdmull_n_s32 (int32x2_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmull.s32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqdmull_n_s16 (int16x4_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmull.s16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Vector saturating doubling multiply high by scalar
+
+ at itemize @bullet
+ at item int32x4_t vqdmulhq_n_s32 (int32x4_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmulh.s32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vqdmulhq_n_s16 (int16x8_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmulh.s16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqdmulh_n_s32 (int32x2_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmulh.s32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqdmulh_n_s16 (int16x4_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmulh.s16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqrdmulhq_n_s32 (int32x4_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vqrdmulh.s32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vqrdmulhq_n_s16 (int16x8_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vqrdmulh.s16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vqrdmulh_n_s32 (int32x2_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vqrdmulh.s32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vqrdmulh_n_s16 (int16x4_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vqrdmulh.s16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Vector multiply-accumulate by scalar
+
+ at itemize @bullet
+ at item float32x2_t vmla_n_f32 (float32x2_t, float32x2_t, float32_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.f32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vmla_n_u32 (uint32x2_t, uint32x2_t, uint32_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vmla_n_u16 (uint16x4_t, uint16x4_t, uint16_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vmla_n_s32 (int32x2_t, int32x2_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vmla_n_s16 (int16x4_t, int16x4_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vmlaq_n_f32 (float32x4_t, float32x4_t, float32_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.f32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmlaq_n_u32 (uint32x4_t, uint32x4_t, uint32_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmlaq_n_u16 (uint16x8_t, uint16x8_t, uint16_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmlaq_n_s32 (int32x4_t, int32x4_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmlaq_n_s16 (int16x8_t, int16x8_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vmla.i16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vmlal_n_u32 (uint64x2_t, uint32x2_t, uint32_t)
+@*@emph{Form of expected instruction(s):} @code{vmlal.u32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmlal_n_u16 (uint32x4_t, uint16x4_t, uint16_t)
+@*@emph{Form of expected instruction(s):} @code{vmlal.u16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vmlal_n_s32 (int64x2_t, int32x2_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vmlal.s32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmlal_n_s16 (int32x4_t, int16x4_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vmlal.s16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vqdmlal_n_s32 (int64x2_t, int32x2_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmlal.s32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqdmlal_n_s16 (int32x4_t, int16x4_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmlal.s16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Vector multiply-subtract by scalar
+
+ at itemize @bullet
+ at item float32x2_t vmls_n_f32 (float32x2_t, float32x2_t, float32_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.f32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vmls_n_u32 (uint32x2_t, uint32x2_t, uint32_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vmls_n_u16 (uint16x4_t, uint16x4_t, uint16_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vmls_n_s32 (int32x2_t, int32x2_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i32 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vmls_n_s16 (int16x4_t, int16x4_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i16 @var{d0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vmlsq_n_f32 (float32x4_t, float32x4_t, float32_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.f32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmlsq_n_u32 (uint32x4_t, uint32x4_t, uint32_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vmlsq_n_u16 (uint16x8_t, uint16x8_t, uint16_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmlsq_n_s32 (int32x4_t, int32x4_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i32 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vmlsq_n_s16 (int16x8_t, int16x8_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vmls.i16 @var{q0}, @var{q0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vmlsl_n_u32 (uint64x2_t, uint32x2_t, uint32_t)
+@*@emph{Form of expected instruction(s):} @code{vmlsl.u32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vmlsl_n_u16 (uint32x4_t, uint16x4_t, uint16_t)
+@*@emph{Form of expected instruction(s):} @code{vmlsl.u16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vmlsl_n_s32 (int64x2_t, int32x2_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vmlsl.s32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vmlsl_n_s16 (int32x4_t, int16x4_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vmlsl.s16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vqdmlsl_n_s32 (int64x2_t, int32x2_t, int32_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmlsl.s32 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vqdmlsl_n_s16 (int32x4_t, int16x4_t, int16_t)
+@*@emph{Form of expected instruction(s):} @code{vqdmlsl.s16 @var{q0}, @var{d0}, @var{d0}[@var{0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Vector extract
+
+ at itemize @bullet
+ at item uint32x2_t vext_u32 (uint32x2_t, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.32 @var{d0}, @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vext_u16 (uint16x4_t, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.16 @var{d0}, @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vext_u8 (uint8x8_t, uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.8 @var{d0}, @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vext_s32 (int32x2_t, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.32 @var{d0}, @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vext_s16 (int16x4_t, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.16 @var{d0}, @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vext_s8 (int8x8_t, int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.8 @var{d0}, @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vext_u64 (uint64x1_t, uint64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.64 @var{d0}, @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vext_s64 (int64x1_t, int64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.64 @var{d0}, @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vext_f32 (float32x2_t, float32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.32 @var{d0}, @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vext_p16 (poly16x4_t, poly16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.16 @var{d0}, @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vext_p8 (poly8x8_t, poly8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.8 @var{d0}, @var{d0}, @var{d0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vextq_u32 (uint32x4_t, uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.32 @var{q0}, @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vextq_u16 (uint16x8_t, uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.16 @var{q0}, @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vextq_u8 (uint8x16_t, uint8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.8 @var{q0}, @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vextq_s32 (int32x4_t, int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.32 @var{q0}, @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vextq_s16 (int16x8_t, int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.16 @var{q0}, @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vextq_s8 (int8x16_t, int8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.8 @var{q0}, @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vextq_u64 (uint64x2_t, uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.64 @var{q0}, @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vextq_s64 (int64x2_t, int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.64 @var{q0}, @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vextq_f32 (float32x4_t, float32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.32 @var{q0}, @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vextq_p16 (poly16x8_t, poly16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.16 @var{q0}, @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vextq_p8 (poly8x16_t, poly8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vext.8 @var{q0}, @var{q0}, @var{q0}, #@var{0}}
+ at end itemize
+
+
+
+
+ at subsubsection Reverse elements
+
+ at itemize @bullet
+ at item uint32x2_t vrev64_u32 (uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vrev64_u16 (uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vrev64_u8 (uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vrev64_s32 (int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vrev64_s16 (int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vrev64_s8 (int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vrev64_f32 (float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.32 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vrev64_p16 (poly16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vrev64_p8 (poly8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vrev64q_u32 (uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vrev64q_u16 (uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vrev64q_u8 (uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vrev64q_s32 (int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vrev64q_s16 (int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vrev64q_s8 (int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vrev64q_f32 (float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.32 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vrev64q_p16 (poly16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vrev64q_p8 (poly8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vrev64.8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vrev32_u16 (uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrev32.16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vrev32_s16 (int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrev32.16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vrev32_u8 (uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrev32.8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vrev32_s8 (int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrev32.8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vrev32_p16 (poly16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vrev32.16 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vrev32_p8 (poly8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrev32.8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vrev32q_u16 (uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrev32.16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vrev32q_s16 (int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrev32.16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vrev32q_u8 (uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vrev32.8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vrev32q_s8 (int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vrev32.8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vrev32q_p16 (poly16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrev32.16 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vrev32q_p8 (poly8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vrev32.8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vrev16_u8 (uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrev16.8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vrev16_s8 (int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrev16.8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vrev16_p8 (poly8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vrev16.8 @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vrev16q_u8 (uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vrev16.8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vrev16q_s8 (int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vrev16.8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vrev16q_p8 (poly8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vrev16.8 @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Bit selection
+
+ at itemize @bullet
+ at item uint32x2_t vbsl_u32 (uint32x2_t, uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbit @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbif @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vbsl_u16 (uint16x4_t, uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbit @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbif @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vbsl_u8 (uint8x8_t, uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbit @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbif @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vbsl_s32 (uint32x2_t, int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbit @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbif @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vbsl_s16 (uint16x4_t, int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbit @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbif @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vbsl_s8 (uint8x8_t, int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbit @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbif @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vbsl_u64 (uint64x1_t, uint64x1_t, uint64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbit @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbif @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vbsl_s64 (uint64x1_t, int64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbit @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbif @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vbsl_f32 (uint32x2_t, float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbit @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbif @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vbsl_p16 (uint16x4_t, poly16x4_t, poly16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbit @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbif @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vbsl_p8 (uint8x8_t, poly8x8_t, poly8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbit @var{d0}, @var{d0}, @var{d0}} @emph{or} @code{vbif @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vbslq_u32 (uint32x4_t, uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbit @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbif @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vbslq_u16 (uint16x8_t, uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbit @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbif @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vbslq_u8 (uint8x16_t, uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbit @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbif @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vbslq_s32 (uint32x4_t, int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbit @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbif @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vbslq_s16 (uint16x8_t, int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbit @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbif @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vbslq_s8 (uint8x16_t, int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbit @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbif @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vbslq_u64 (uint64x2_t, uint64x2_t, uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbit @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbif @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vbslq_s64 (uint64x2_t, int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbit @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbif @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vbslq_f32 (uint32x4_t, float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbit @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbif @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vbslq_p16 (uint16x8_t, poly16x8_t, poly16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbit @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbif @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vbslq_p8 (uint8x16_t, poly8x16_t, poly8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vbsl @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbit @var{q0}, @var{q0}, @var{q0}} @emph{or} @code{vbif @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Transpose elements
+
+ at itemize @bullet
+ at item uint32x2x2_t vtrn_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.32 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4x2_t vtrn_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.16 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8x2_t vtrn_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.8 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2x2_t vtrn_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.32 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4x2_t vtrn_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.16 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8x2_t vtrn_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.8 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2x2_t vtrn_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.32 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4x2_t vtrn_p16 (poly16x4_t, poly16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.16 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8x2_t vtrn_p8 (poly8x8_t, poly8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.8 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4x2_t vtrnq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.32 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8x2_t vtrnq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.16 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16x2_t vtrnq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.8 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4x2_t vtrnq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.32 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8x2_t vtrnq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.16 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16x2_t vtrnq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.8 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4x2_t vtrnq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.32 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8x2_t vtrnq_p16 (poly16x8_t, poly16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.16 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16x2_t vtrnq_p8 (poly8x16_t, poly8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vtrn.8 @var{q0}, @var{q1}}
+ at end itemize
+
+
+
+
+ at subsubsection Zip elements
+
+ at itemize @bullet
+ at item uint32x2x2_t vzip_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.32 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4x2_t vzip_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.16 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8x2_t vzip_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.8 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2x2_t vzip_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.32 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4x2_t vzip_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.16 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8x2_t vzip_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.8 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2x2_t vzip_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.32 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4x2_t vzip_p16 (poly16x4_t, poly16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.16 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8x2_t vzip_p8 (poly8x8_t, poly8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.8 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4x2_t vzipq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.32 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8x2_t vzipq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.16 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16x2_t vzipq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.8 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4x2_t vzipq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.32 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8x2_t vzipq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.16 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16x2_t vzipq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.8 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4x2_t vzipq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.32 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8x2_t vzipq_p16 (poly16x8_t, poly16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.16 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16x2_t vzipq_p8 (poly8x16_t, poly8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vzip.8 @var{q0}, @var{q1}}
+ at end itemize
+
+
+
+
+ at subsubsection Unzip elements
+
+ at itemize @bullet
+ at item uint32x2x2_t vuzp_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4x2_t vuzp_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.16 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8x2_t vuzp_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.8 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2x2_t vuzp_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4x2_t vuzp_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.16 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8x2_t vuzp_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.8 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2x2_t vuzp_f32 (float32x2_t, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4x2_t vuzp_p16 (poly16x4_t, poly16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.16 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8x2_t vuzp_p8 (poly8x8_t, poly8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.8 @var{d0}, @var{d1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4x2_t vuzpq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8x2_t vuzpq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.16 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16x2_t vuzpq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.8 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4x2_t vuzpq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8x2_t vuzpq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.16 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16x2_t vuzpq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.8 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4x2_t vuzpq_f32 (float32x4_t, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.32 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8x2_t vuzpq_p16 (poly16x8_t, poly16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.16 @var{q0}, @var{q1}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16x2_t vuzpq_p8 (poly8x16_t, poly8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vuzp.8 @var{q0}, @var{q1}}
+ at end itemize
+
+
+
+
+ at subsubsection Element/structure loads, VLD1 variants
+
+ at itemize @bullet
+ at item uint32x2_t vld1_u32 (const uint32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vld1_u16 (const uint16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vld1_u8 (const uint8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vld1_s32 (const int32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vld1_s16 (const int16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vld1_s8 (const int8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vld1_u64 (const uint64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vld1_s64 (const int64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vld1_f32 (const float32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vld1_p16 (const poly16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vld1_p8 (const poly8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vld1q_u32 (const uint32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vld1q_u16 (const uint16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vld1q_u8 (const uint8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vld1q_s32 (const int32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vld1q_s16 (const int16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vld1q_s8 (const int8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vld1q_u64 (const uint64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vld1q_s64 (const int64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vld1q_f32 (const float32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vld1q_p16 (const poly16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vld1q_p8 (const poly8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vld1_lane_u32 (const uint32_t *, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vld1_lane_u16 (const uint16_t *, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vld1_lane_u8 (const uint8_t *, uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vld1_lane_s32 (const int32_t *, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vld1_lane_s16 (const int16_t *, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vld1_lane_s8 (const int8_t *, int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vld1_lane_f32 (const float32_t *, float32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vld1_lane_p16 (const poly16_t *, poly16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vld1_lane_p8 (const poly8_t *, poly8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vld1_lane_u64 (const uint64_t *, uint64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vld1_lane_s64 (const int64_t *, int64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vld1q_lane_u32 (const uint32_t *, uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vld1q_lane_u16 (const uint16_t *, uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vld1q_lane_u8 (const uint8_t *, uint8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vld1q_lane_s32 (const int32_t *, int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vld1q_lane_s16 (const int16_t *, int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vld1q_lane_s8 (const int8_t *, int8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vld1q_lane_f32 (const float32_t *, float32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vld1q_lane_p16 (const poly16_t *, poly16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vld1q_lane_p8 (const poly8_t *, poly8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vld1q_lane_u64 (const uint64_t *, uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vld1q_lane_s64 (const int64_t *, int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vld1_dup_u32 (const uint32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vld1_dup_u16 (const uint16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vld1_dup_u8 (const uint8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vld1_dup_s32 (const int32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vld1_dup_s16 (const int16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vld1_dup_s8 (const int8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vld1_dup_f32 (const float32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vld1_dup_p16 (const poly16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vld1_dup_p8 (const poly8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vld1_dup_u64 (const uint64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vld1_dup_s64 (const int64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vld1q_dup_u32 (const uint32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vld1q_dup_u16 (const uint16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vld1q_dup_u8 (const uint8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vld1q_dup_s32 (const int32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vld1q_dup_s16 (const int16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vld1q_dup_s8 (const int8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vld1q_dup_f32 (const float32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.32 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vld1q_dup_p16 (const poly16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.16 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vld1q_dup_p8 (const poly8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.8 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vld1q_dup_u64 (const uint64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vld1q_dup_s64 (const int64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Element/structure stores, VST1 variants
+
+ at itemize @bullet
+ at item void vst1_u32 (uint32_t *, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.32 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_u16 (uint16_t *, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.16 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_u8 (uint8_t *, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.8 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_s32 (int32_t *, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.32 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_s16 (int16_t *, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.16 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_s8 (int8_t *, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.8 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_u64 (uint64_t *, uint64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_s64 (int64_t *, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_f32 (float32_t *, float32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.32 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_p16 (poly16_t *, poly16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.16 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_p8 (poly8_t *, poly8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.8 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_u32 (uint32_t *, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_u16 (uint16_t *, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_u8 (uint8_t *, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_s32 (int32_t *, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_s16 (int16_t *, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_s8 (int8_t *, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_u64 (uint64_t *, uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_s64 (int64_t *, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_f32 (float32_t *, float32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_p16 (poly16_t *, poly16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_p8 (poly8_t *, poly8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_lane_u32 (uint32_t *, uint32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.32 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_lane_u16 (uint16_t *, uint16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.16 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_lane_u8 (uint8_t *, uint8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.8 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_lane_s32 (int32_t *, int32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.32 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_lane_s16 (int16_t *, int16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.16 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_lane_s8 (int8_t *, int8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.8 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_lane_f32 (float32_t *, float32x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.32 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_lane_p16 (poly16_t *, poly16x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.16 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_lane_p8 (poly8_t *, poly8x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.8 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_lane_s64 (int64_t *, int64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1_lane_u64 (uint64_t *, uint64x1_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_lane_u32 (uint32_t *, uint32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.32 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_lane_u16 (uint16_t *, uint16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.16 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_lane_u8 (uint8_t *, uint8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.8 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_lane_s32 (int32_t *, int32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.32 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_lane_s16 (int16_t *, int16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.16 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_lane_s8 (int8_t *, int8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.8 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_lane_f32 (float32_t *, float32x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.32 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_lane_p16 (poly16_t *, poly16x8_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.16 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_lane_p8 (poly8_t *, poly8x16_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.8 @{@var{d0}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_lane_s64 (int64_t *, int64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst1q_lane_u64 (uint64_t *, uint64x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}@}, [@var{r0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Element/structure loads, VLD2 variants
+
+ at itemize @bullet
+ at item uint32x2x2_t vld2_u32 (const uint32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4x2_t vld2_u16 (const uint16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8x2_t vld2_u8 (const uint8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2x2_t vld2_s32 (const int32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4x2_t vld2_s16 (const int16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8x2_t vld2_s8 (const int8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2x2_t vld2_f32 (const float32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4x2_t vld2_p16 (const poly16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8x2_t vld2_p8 (const poly8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1x2_t vld2_u64 (const uint64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1x2_t vld2_s64 (const int64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4x2_t vld2q_u32 (const uint32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8x2_t vld2q_u16 (const uint16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16x2_t vld2q_u8 (const uint8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4x2_t vld2q_s32 (const int32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8x2_t vld2q_s16 (const int16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16x2_t vld2q_s8 (const int8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4x2_t vld2q_f32 (const float32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8x2_t vld2q_p16 (const poly16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16x2_t vld2q_p8 (const poly8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2x2_t vld2_lane_u32 (const uint32_t *, uint32x2x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld2.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4x2_t vld2_lane_u16 (const uint16_t *, uint16x4x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld2.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8x2_t vld2_lane_u8 (const uint8_t *, uint8x8x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld2.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2x2_t vld2_lane_s32 (const int32_t *, int32x2x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld2.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4x2_t vld2_lane_s16 (const int16_t *, int16x4x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld2.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8x2_t vld2_lane_s8 (const int8_t *, int8x8x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld2.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2x2_t vld2_lane_f32 (const float32_t *, float32x2x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld2.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4x2_t vld2_lane_p16 (const poly16_t *, poly16x4x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld2.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8x2_t vld2_lane_p8 (const poly8_t *, poly8x8x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld2.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4x2_t vld2q_lane_s32 (const int32_t *, int32x4x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld2.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8x2_t vld2q_lane_s16 (const int16_t *, int16x8x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld2.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4x2_t vld2q_lane_u32 (const uint32_t *, uint32x4x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld2.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8x2_t vld2q_lane_u16 (const uint16_t *, uint16x8x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld2.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4x2_t vld2q_lane_f32 (const float32_t *, float32x4x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld2.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8x2_t vld2q_lane_p16 (const poly16_t *, poly16x8x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld2.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2x2_t vld2_dup_u32 (const uint32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.32 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4x2_t vld2_dup_u16 (const uint16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.16 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8x2_t vld2_dup_u8 (const uint8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.8 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2x2_t vld2_dup_s32 (const int32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.32 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4x2_t vld2_dup_s16 (const int16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.16 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8x2_t vld2_dup_s8 (const int8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.8 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2x2_t vld2_dup_f32 (const float32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.32 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4x2_t vld2_dup_p16 (const poly16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.16 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8x2_t vld2_dup_p8 (const poly8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld2.8 @{@var{d0}[], @var{d1}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1x2_t vld2_dup_u64 (const uint64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1x2_t vld2_dup_s64 (const int64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Element/structure stores, VST2 variants
+
+ at itemize @bullet
+ at item void vst2_u32 (uint32_t *, uint32x2x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_u16 (uint16_t *, uint16x4x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_u8 (uint8_t *, uint8x8x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_s32 (int32_t *, int32x2x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_s16 (int16_t *, int16x4x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_s8 (int8_t *, int8x8x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_f32 (float32_t *, float32x2x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_p16 (poly16_t *, poly16x4x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_p8 (poly8_t *, poly8x8x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_u64 (uint64_t *, uint64x1x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_s64 (int64_t *, int64x1x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2q_u32 (uint32_t *, uint32x4x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2q_u16 (uint16_t *, uint16x8x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2q_u8 (uint8_t *, uint8x16x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2q_s32 (int32_t *, int32x4x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2q_s16 (int16_t *, int16x8x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2q_s8 (int8_t *, int8x16x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2q_f32 (float32_t *, float32x4x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.32 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2q_p16 (poly16_t *, poly16x8x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.16 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2q_p8 (poly8_t *, poly8x16x2_t)
+@*@emph{Form of expected instruction(s):} @code{vst2.8 @{@var{d0}, @var{d1}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_lane_u32 (uint32_t *, uint32x2x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst2.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_lane_u16 (uint16_t *, uint16x4x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst2.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_lane_u8 (uint8_t *, uint8x8x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst2.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_lane_s32 (int32_t *, int32x2x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst2.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_lane_s16 (int16_t *, int16x4x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst2.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_lane_s8 (int8_t *, int8x8x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst2.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_lane_f32 (float32_t *, float32x2x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst2.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_lane_p16 (poly16_t *, poly16x4x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst2.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2_lane_p8 (poly8_t *, poly8x8x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst2.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2q_lane_s32 (int32_t *, int32x4x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst2.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2q_lane_s16 (int16_t *, int16x8x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst2.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2q_lane_u32 (uint32_t *, uint32x4x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst2.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2q_lane_u16 (uint16_t *, uint16x8x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst2.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2q_lane_f32 (float32_t *, float32x4x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst2.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst2q_lane_p16 (poly16_t *, poly16x8x2_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst2.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Element/structure loads, VLD3 variants
+
+ at itemize @bullet
+ at item uint32x2x3_t vld3_u32 (const uint32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.32 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4x3_t vld3_u16 (const uint16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.16 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8x3_t vld3_u8 (const uint8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.8 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2x3_t vld3_s32 (const int32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.32 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4x3_t vld3_s16 (const int16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.16 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8x3_t vld3_s8 (const int8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.8 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2x3_t vld3_f32 (const float32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.32 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4x3_t vld3_p16 (const poly16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.16 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8x3_t vld3_p8 (const poly8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.8 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1x3_t vld3_u64 (const uint64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1x3_t vld3_s64 (const int64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4x3_t vld3q_u32 (const uint32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.32 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8x3_t vld3q_u16 (const uint16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.16 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16x3_t vld3q_u8 (const uint8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.8 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4x3_t vld3q_s32 (const int32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.32 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8x3_t vld3q_s16 (const int16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.16 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16x3_t vld3q_s8 (const int8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.8 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4x3_t vld3q_f32 (const float32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.32 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8x3_t vld3q_p16 (const poly16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.16 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16x3_t vld3q_p8 (const poly8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.8 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2x3_t vld3_lane_u32 (const uint32_t *, uint32x2x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld3.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4x3_t vld3_lane_u16 (const uint16_t *, uint16x4x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld3.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8x3_t vld3_lane_u8 (const uint8_t *, uint8x8x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld3.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2x3_t vld3_lane_s32 (const int32_t *, int32x2x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld3.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4x3_t vld3_lane_s16 (const int16_t *, int16x4x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld3.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8x3_t vld3_lane_s8 (const int8_t *, int8x8x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld3.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2x3_t vld3_lane_f32 (const float32_t *, float32x2x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld3.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4x3_t vld3_lane_p16 (const poly16_t *, poly16x4x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld3.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8x3_t vld3_lane_p8 (const poly8_t *, poly8x8x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld3.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4x3_t vld3q_lane_s32 (const int32_t *, int32x4x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld3.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8x3_t vld3q_lane_s16 (const int16_t *, int16x8x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld3.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4x3_t vld3q_lane_u32 (const uint32_t *, uint32x4x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld3.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8x3_t vld3q_lane_u16 (const uint16_t *, uint16x8x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld3.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4x3_t vld3q_lane_f32 (const float32_t *, float32x4x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld3.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8x3_t vld3q_lane_p16 (const poly16_t *, poly16x8x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld3.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2x3_t vld3_dup_u32 (const uint32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.32 @{@var{d0}[], @var{d1}[], @var{d2}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4x3_t vld3_dup_u16 (const uint16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.16 @{@var{d0}[], @var{d1}[], @var{d2}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8x3_t vld3_dup_u8 (const uint8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.8 @{@var{d0}[], @var{d1}[], @var{d2}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2x3_t vld3_dup_s32 (const int32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.32 @{@var{d0}[], @var{d1}[], @var{d2}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4x3_t vld3_dup_s16 (const int16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.16 @{@var{d0}[], @var{d1}[], @var{d2}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8x3_t vld3_dup_s8 (const int8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.8 @{@var{d0}[], @var{d1}[], @var{d2}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2x3_t vld3_dup_f32 (const float32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.32 @{@var{d0}[], @var{d1}[], @var{d2}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4x3_t vld3_dup_p16 (const poly16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.16 @{@var{d0}[], @var{d1}[], @var{d2}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8x3_t vld3_dup_p8 (const poly8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld3.8 @{@var{d0}[], @var{d1}[], @var{d2}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1x3_t vld3_dup_u64 (const uint64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1x3_t vld3_dup_s64 (const int64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Element/structure stores, VST3 variants
+
+ at itemize @bullet
+ at item void vst3_u32 (uint32_t *, uint32x2x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.32 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_u16 (uint16_t *, uint16x4x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.16 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_u8 (uint8_t *, uint8x8x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.8 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_s32 (int32_t *, int32x2x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.32 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_s16 (int16_t *, int16x4x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.16 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_s8 (int8_t *, int8x8x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.8 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_f32 (float32_t *, float32x2x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.32 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_p16 (poly16_t *, poly16x4x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.16 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_p8 (poly8_t *, poly8x8x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.8 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_u64 (uint64_t *, uint64x1x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_s64 (int64_t *, int64x1x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3q_u32 (uint32_t *, uint32x4x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.32 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3q_u16 (uint16_t *, uint16x8x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.16 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3q_u8 (uint8_t *, uint8x16x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.8 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3q_s32 (int32_t *, int32x4x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.32 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3q_s16 (int16_t *, int16x8x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.16 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3q_s8 (int8_t *, int8x16x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.8 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3q_f32 (float32_t *, float32x4x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.32 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3q_p16 (poly16_t *, poly16x8x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.16 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3q_p8 (poly8_t *, poly8x16x3_t)
+@*@emph{Form of expected instruction(s):} @code{vst3.8 @{@var{d0}, @var{d1}, @var{d2}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_lane_u32 (uint32_t *, uint32x2x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst3.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_lane_u16 (uint16_t *, uint16x4x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst3.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_lane_u8 (uint8_t *, uint8x8x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst3.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_lane_s32 (int32_t *, int32x2x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst3.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_lane_s16 (int16_t *, int16x4x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst3.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_lane_s8 (int8_t *, int8x8x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst3.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_lane_f32 (float32_t *, float32x2x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst3.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_lane_p16 (poly16_t *, poly16x4x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst3.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3_lane_p8 (poly8_t *, poly8x8x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst3.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3q_lane_s32 (int32_t *, int32x4x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst3.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3q_lane_s16 (int16_t *, int16x8x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst3.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3q_lane_u32 (uint32_t *, uint32x4x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst3.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3q_lane_u16 (uint16_t *, uint16x8x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst3.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3q_lane_f32 (float32_t *, float32x4x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst3.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst3q_lane_p16 (poly16_t *, poly16x8x3_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst3.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Element/structure loads, VLD4 variants
+
+ at itemize @bullet
+ at item uint32x2x4_t vld4_u32 (const uint32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.32 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4x4_t vld4_u16 (const uint16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.16 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8x4_t vld4_u8 (const uint8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.8 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2x4_t vld4_s32 (const int32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.32 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4x4_t vld4_s16 (const int16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.16 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8x4_t vld4_s8 (const int8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.8 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2x4_t vld4_f32 (const float32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.32 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4x4_t vld4_p16 (const poly16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.16 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8x4_t vld4_p8 (const poly8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.8 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1x4_t vld4_u64 (const uint64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1x4_t vld4_s64 (const int64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4x4_t vld4q_u32 (const uint32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.32 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8x4_t vld4q_u16 (const uint16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.16 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16x4_t vld4q_u8 (const uint8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.8 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4x4_t vld4q_s32 (const int32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.32 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8x4_t vld4q_s16 (const int16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.16 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16x4_t vld4q_s8 (const int8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.8 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4x4_t vld4q_f32 (const float32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.32 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8x4_t vld4q_p16 (const poly16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.16 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16x4_t vld4q_p8 (const poly8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.8 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2x4_t vld4_lane_u32 (const uint32_t *, uint32x2x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld4.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4x4_t vld4_lane_u16 (const uint16_t *, uint16x4x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld4.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8x4_t vld4_lane_u8 (const uint8_t *, uint8x8x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld4.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2x4_t vld4_lane_s32 (const int32_t *, int32x2x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld4.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4x4_t vld4_lane_s16 (const int16_t *, int16x4x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld4.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8x4_t vld4_lane_s8 (const int8_t *, int8x8x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld4.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2x4_t vld4_lane_f32 (const float32_t *, float32x2x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld4.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4x4_t vld4_lane_p16 (const poly16_t *, poly16x4x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld4.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8x4_t vld4_lane_p8 (const poly8_t *, poly8x8x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld4.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4x4_t vld4q_lane_s32 (const int32_t *, int32x4x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld4.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8x4_t vld4q_lane_s16 (const int16_t *, int16x8x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld4.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4x4_t vld4q_lane_u32 (const uint32_t *, uint32x4x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld4.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8x4_t vld4q_lane_u16 (const uint16_t *, uint16x8x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld4.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4x4_t vld4q_lane_f32 (const float32_t *, float32x4x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld4.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8x4_t vld4q_lane_p16 (const poly16_t *, poly16x8x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vld4.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2x4_t vld4_dup_u32 (const uint32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.32 @{@var{d0}[], @var{d1}[], @var{d2}[], @var{d3}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4x4_t vld4_dup_u16 (const uint16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.16 @{@var{d0}[], @var{d1}[], @var{d2}[], @var{d3}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8x4_t vld4_dup_u8 (const uint8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.8 @{@var{d0}[], @var{d1}[], @var{d2}[], @var{d3}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2x4_t vld4_dup_s32 (const int32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.32 @{@var{d0}[], @var{d1}[], @var{d2}[], @var{d3}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4x4_t vld4_dup_s16 (const int16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.16 @{@var{d0}[], @var{d1}[], @var{d2}[], @var{d3}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8x4_t vld4_dup_s8 (const int8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.8 @{@var{d0}[], @var{d1}[], @var{d2}[], @var{d3}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2x4_t vld4_dup_f32 (const float32_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.32 @{@var{d0}[], @var{d1}[], @var{d2}[], @var{d3}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4x4_t vld4_dup_p16 (const poly16_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.16 @{@var{d0}[], @var{d1}[], @var{d2}[], @var{d3}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8x4_t vld4_dup_p8 (const poly8_t *)
+@*@emph{Form of expected instruction(s):} @code{vld4.8 @{@var{d0}[], @var{d1}[], @var{d2}[], @var{d3}[]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1x4_t vld4_dup_u64 (const uint64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1x4_t vld4_dup_s64 (const int64_t *)
+@*@emph{Form of expected instruction(s):} @code{vld1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Element/structure stores, VST4 variants
+
+ at itemize @bullet
+ at item void vst4_u32 (uint32_t *, uint32x2x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.32 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_u16 (uint16_t *, uint16x4x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.16 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_u8 (uint8_t *, uint8x8x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.8 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_s32 (int32_t *, int32x2x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.32 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_s16 (int16_t *, int16x4x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.16 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_s8 (int8_t *, int8x8x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.8 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_f32 (float32_t *, float32x2x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.32 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_p16 (poly16_t *, poly16x4x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.16 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_p8 (poly8_t *, poly8x8x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.8 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_u64 (uint64_t *, uint64x1x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_s64 (int64_t *, int64x1x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst1.64 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4q_u32 (uint32_t *, uint32x4x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.32 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4q_u16 (uint16_t *, uint16x8x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.16 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4q_u8 (uint8_t *, uint8x16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.8 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4q_s32 (int32_t *, int32x4x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.32 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4q_s16 (int16_t *, int16x8x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.16 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4q_s8 (int8_t *, int8x16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.8 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4q_f32 (float32_t *, float32x4x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.32 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4q_p16 (poly16_t *, poly16x8x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.16 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4q_p8 (poly8_t *, poly8x16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vst4.8 @{@var{d0}, @var{d1}, @var{d2}, @var{d3}@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_lane_u32 (uint32_t *, uint32x2x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst4.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_lane_u16 (uint16_t *, uint16x4x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst4.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_lane_u8 (uint8_t *, uint8x8x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst4.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_lane_s32 (int32_t *, int32x2x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst4.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_lane_s16 (int16_t *, int16x4x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst4.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_lane_s8 (int8_t *, int8x8x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst4.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_lane_f32 (float32_t *, float32x2x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst4.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_lane_p16 (poly16_t *, poly16x4x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst4.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4_lane_p8 (poly8_t *, poly8x8x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst4.8 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4q_lane_s32 (int32_t *, int32x4x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst4.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4q_lane_s16 (int16_t *, int16x8x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst4.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4q_lane_u32 (uint32_t *, uint32x4x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst4.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4q_lane_u16 (uint16_t *, uint16x8x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst4.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4q_lane_f32 (float32_t *, float32x4x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst4.32 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+ at itemize @bullet
+ at item void vst4q_lane_p16 (poly16_t *, poly16x8x4_t, const int)
+@*@emph{Form of expected instruction(s):} @code{vst4.16 @{@var{d0}[@var{0}], @var{d1}[@var{0}], @var{d2}[@var{0}], @var{d3}[@var{0}]@}, [@var{r0}]}
+ at end itemize
+
+
+
+
+ at subsubsection Logical operations (AND)
+
+ at itemize @bullet
+ at item uint32x2_t vand_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vand_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vand_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vand_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vand_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vand_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vand_u64 (uint64x1_t, uint64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vand_s64 (int64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vandq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vandq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vandq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vandq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vandq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vandq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vandq_u64 (uint64x2_t, uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vandq_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vand @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Logical operations (OR)
+
+ at itemize @bullet
+ at item uint32x2_t vorr_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vorr_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vorr_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vorr_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vorr_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vorr_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vorr_u64 (uint64x1_t, uint64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vorr_s64 (int64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vorrq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vorrq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vorrq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vorrq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vorrq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vorrq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vorrq_u64 (uint64x2_t, uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vorrq_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vorr @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Logical operations (exclusive OR)
+
+ at itemize @bullet
+ at item uint32x2_t veor_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t veor_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t veor_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t veor_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t veor_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t veor_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t veor_u64 (uint64x1_t, uint64x1_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t veor_s64 (int64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t veorq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t veorq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t veorq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t veorq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t veorq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t veorq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t veorq_u64 (uint64x2_t, uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t veorq_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{veor @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Logical operations (AND-NOT)
+
+ at itemize @bullet
+ at item uint32x2_t vbic_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vbic_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vbic_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vbic_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vbic_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vbic_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vbic_u64 (uint64x1_t, uint64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vbic_s64 (int64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vbicq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vbicq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vbicq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vbicq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vbicq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vbicq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vbicq_u64 (uint64x2_t, uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vbicq_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vbic @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Logical operations (OR-NOT)
+
+ at itemize @bullet
+ at item uint32x2_t vorn_u32 (uint32x2_t, uint32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vorn_u16 (uint16x4_t, uint16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vorn_u8 (uint8x8_t, uint8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vorn_s32 (int32x2_t, int32x2_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vorn_s16 (int16x4_t, int16x4_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vorn_s8 (int8x8_t, int8x8_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vorn_u64 (uint64x1_t, uint64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vorn_s64 (int64x1_t, int64x1_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{d0}, @var{d0}, @var{d0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vornq_u32 (uint32x4_t, uint32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vornq_u16 (uint16x8_t, uint16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vornq_u8 (uint8x16_t, uint8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vornq_s32 (int32x4_t, int32x4_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vornq_s16 (int16x8_t, int16x8_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vornq_s8 (int8x16_t, int8x16_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vornq_u64 (uint64x2_t, uint64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vornq_s64 (int64x2_t, int64x2_t)
+@*@emph{Form of expected instruction(s):} @code{vorn @var{q0}, @var{q0}, @var{q0}}
+ at end itemize
+
+
+
+
+ at subsubsection Reinterpret casts
+
+ at itemize @bullet
+ at item poly8x8_t vreinterpret_p8_u32 (uint32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vreinterpret_p8_u16 (uint16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vreinterpret_p8_u8 (uint8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vreinterpret_p8_s32 (int32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vreinterpret_p8_s16 (int16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vreinterpret_p8_s8 (int8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vreinterpret_p8_u64 (uint64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vreinterpret_p8_s64 (int64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vreinterpret_p8_f32 (float32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x8_t vreinterpret_p8_p16 (poly16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vreinterpretq_p8_u32 (uint32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vreinterpretq_p8_u16 (uint16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vreinterpretq_p8_u8 (uint8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vreinterpretq_p8_s32 (int32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vreinterpretq_p8_s16 (int16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vreinterpretq_p8_s8 (int8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vreinterpretq_p8_u64 (uint64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vreinterpretq_p8_s64 (int64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vreinterpretq_p8_f32 (float32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly8x16_t vreinterpretq_p8_p16 (poly16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vreinterpret_p16_u32 (uint32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vreinterpret_p16_u16 (uint16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vreinterpret_p16_u8 (uint8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vreinterpret_p16_s32 (int32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vreinterpret_p16_s16 (int16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vreinterpret_p16_s8 (int8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vreinterpret_p16_u64 (uint64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vreinterpret_p16_s64 (int64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vreinterpret_p16_f32 (float32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x4_t vreinterpret_p16_p8 (poly8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vreinterpretq_p16_u32 (uint32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vreinterpretq_p16_u16 (uint16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vreinterpretq_p16_u8 (uint8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vreinterpretq_p16_s32 (int32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vreinterpretq_p16_s16 (int16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vreinterpretq_p16_s8 (int8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vreinterpretq_p16_u64 (uint64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vreinterpretq_p16_s64 (int64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vreinterpretq_p16_f32 (float32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item poly16x8_t vreinterpretq_p16_p8 (poly8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vreinterpret_f32_u32 (uint32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vreinterpret_f32_u16 (uint16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vreinterpret_f32_u8 (uint8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vreinterpret_f32_s32 (int32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vreinterpret_f32_s16 (int16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vreinterpret_f32_s8 (int8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vreinterpret_f32_u64 (uint64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vreinterpret_f32_s64 (int64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vreinterpret_f32_p16 (poly16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x2_t vreinterpret_f32_p8 (poly8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vreinterpretq_f32_u32 (uint32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vreinterpretq_f32_u16 (uint16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vreinterpretq_f32_u8 (uint8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vreinterpretq_f32_s32 (int32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vreinterpretq_f32_s16 (int16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vreinterpretq_f32_s8 (int8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vreinterpretq_f32_u64 (uint64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vreinterpretq_f32_s64 (int64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vreinterpretq_f32_p16 (poly16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item float32x4_t vreinterpretq_f32_p8 (poly8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vreinterpret_s64_u32 (uint32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vreinterpret_s64_u16 (uint16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vreinterpret_s64_u8 (uint8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vreinterpret_s64_s32 (int32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vreinterpret_s64_s16 (int16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vreinterpret_s64_s8 (int8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vreinterpret_s64_u64 (uint64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vreinterpret_s64_f32 (float32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vreinterpret_s64_p16 (poly16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x1_t vreinterpret_s64_p8 (poly8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vreinterpretq_s64_u32 (uint32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vreinterpretq_s64_u16 (uint16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vreinterpretq_s64_u8 (uint8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vreinterpretq_s64_s32 (int32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vreinterpretq_s64_s16 (int16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vreinterpretq_s64_s8 (int8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vreinterpretq_s64_u64 (uint64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vreinterpretq_s64_f32 (float32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vreinterpretq_s64_p16 (poly16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int64x2_t vreinterpretq_s64_p8 (poly8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vreinterpret_u64_u32 (uint32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vreinterpret_u64_u16 (uint16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vreinterpret_u64_u8 (uint8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vreinterpret_u64_s32 (int32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vreinterpret_u64_s16 (int16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vreinterpret_u64_s8 (int8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vreinterpret_u64_s64 (int64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vreinterpret_u64_f32 (float32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vreinterpret_u64_p16 (poly16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x1_t vreinterpret_u64_p8 (poly8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vreinterpretq_u64_u32 (uint32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vreinterpretq_u64_u16 (uint16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vreinterpretq_u64_u8 (uint8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vreinterpretq_u64_s32 (int32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vreinterpretq_u64_s16 (int16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vreinterpretq_u64_s8 (int8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vreinterpretq_u64_s64 (int64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vreinterpretq_u64_f32 (float32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vreinterpretq_u64_p16 (poly16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint64x2_t vreinterpretq_u64_p8 (poly8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vreinterpret_s8_u32 (uint32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vreinterpret_s8_u16 (uint16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vreinterpret_s8_u8 (uint8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vreinterpret_s8_s32 (int32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vreinterpret_s8_s16 (int16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vreinterpret_s8_u64 (uint64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vreinterpret_s8_s64 (int64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vreinterpret_s8_f32 (float32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vreinterpret_s8_p16 (poly16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x8_t vreinterpret_s8_p8 (poly8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vreinterpretq_s8_u32 (uint32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vreinterpretq_s8_u16 (uint16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vreinterpretq_s8_u8 (uint8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vreinterpretq_s8_s32 (int32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vreinterpretq_s8_s16 (int16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vreinterpretq_s8_u64 (uint64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vreinterpretq_s8_s64 (int64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vreinterpretq_s8_f32 (float32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vreinterpretq_s8_p16 (poly16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int8x16_t vreinterpretq_s8_p8 (poly8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vreinterpret_s16_u32 (uint32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vreinterpret_s16_u16 (uint16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vreinterpret_s16_u8 (uint8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vreinterpret_s16_s32 (int32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vreinterpret_s16_s8 (int8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vreinterpret_s16_u64 (uint64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vreinterpret_s16_s64 (int64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vreinterpret_s16_f32 (float32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vreinterpret_s16_p16 (poly16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x4_t vreinterpret_s16_p8 (poly8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vreinterpretq_s16_u32 (uint32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vreinterpretq_s16_u16 (uint16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vreinterpretq_s16_u8 (uint8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vreinterpretq_s16_s32 (int32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vreinterpretq_s16_s8 (int8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vreinterpretq_s16_u64 (uint64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vreinterpretq_s16_s64 (int64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vreinterpretq_s16_f32 (float32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vreinterpretq_s16_p16 (poly16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int16x8_t vreinterpretq_s16_p8 (poly8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vreinterpret_s32_u32 (uint32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vreinterpret_s32_u16 (uint16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vreinterpret_s32_u8 (uint8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vreinterpret_s32_s16 (int16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vreinterpret_s32_s8 (int8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vreinterpret_s32_u64 (uint64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vreinterpret_s32_s64 (int64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vreinterpret_s32_f32 (float32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vreinterpret_s32_p16 (poly16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x2_t vreinterpret_s32_p8 (poly8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vreinterpretq_s32_u32 (uint32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vreinterpretq_s32_u16 (uint16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vreinterpretq_s32_u8 (uint8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vreinterpretq_s32_s16 (int16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vreinterpretq_s32_s8 (int8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vreinterpretq_s32_u64 (uint64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vreinterpretq_s32_s64 (int64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vreinterpretq_s32_f32 (float32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vreinterpretq_s32_p16 (poly16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item int32x4_t vreinterpretq_s32_p8 (poly8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vreinterpret_u8_u32 (uint32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vreinterpret_u8_u16 (uint16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vreinterpret_u8_s32 (int32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vreinterpret_u8_s16 (int16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vreinterpret_u8_s8 (int8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vreinterpret_u8_u64 (uint64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vreinterpret_u8_s64 (int64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vreinterpret_u8_f32 (float32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vreinterpret_u8_p16 (poly16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x8_t vreinterpret_u8_p8 (poly8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vreinterpretq_u8_u32 (uint32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vreinterpretq_u8_u16 (uint16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vreinterpretq_u8_s32 (int32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vreinterpretq_u8_s16 (int16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vreinterpretq_u8_s8 (int8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vreinterpretq_u8_u64 (uint64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vreinterpretq_u8_s64 (int64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vreinterpretq_u8_f32 (float32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vreinterpretq_u8_p16 (poly16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint8x16_t vreinterpretq_u8_p8 (poly8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vreinterpret_u16_u32 (uint32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vreinterpret_u16_u8 (uint8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vreinterpret_u16_s32 (int32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vreinterpret_u16_s16 (int16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vreinterpret_u16_s8 (int8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vreinterpret_u16_u64 (uint64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vreinterpret_u16_s64 (int64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vreinterpret_u16_f32 (float32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vreinterpret_u16_p16 (poly16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x4_t vreinterpret_u16_p8 (poly8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vreinterpretq_u16_u32 (uint32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vreinterpretq_u16_u8 (uint8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vreinterpretq_u16_s32 (int32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vreinterpretq_u16_s16 (int16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vreinterpretq_u16_s8 (int8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vreinterpretq_u16_u64 (uint64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vreinterpretq_u16_s64 (int64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vreinterpretq_u16_f32 (float32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vreinterpretq_u16_p16 (poly16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint16x8_t vreinterpretq_u16_p8 (poly8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vreinterpret_u32_u16 (uint16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vreinterpret_u32_u8 (uint8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vreinterpret_u32_s32 (int32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vreinterpret_u32_s16 (int16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vreinterpret_u32_s8 (int8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vreinterpret_u32_u64 (uint64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vreinterpret_u32_s64 (int64x1_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vreinterpret_u32_f32 (float32x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vreinterpret_u32_p16 (poly16x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x2_t vreinterpret_u32_p8 (poly8x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vreinterpretq_u32_u16 (uint16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vreinterpretq_u32_u8 (uint8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vreinterpretq_u32_s32 (int32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vreinterpretq_u32_s16 (int16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vreinterpretq_u32_s8 (int8x16_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vreinterpretq_u32_u64 (uint64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vreinterpretq_u32_s64 (int64x2_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vreinterpretq_u32_f32 (float32x4_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vreinterpretq_u32_p16 (poly16x8_t)
+ at end itemize
+
+
+ at itemize @bullet
+ at item uint32x4_t vreinterpretq_u32_p8 (poly8x16_t)
+ at end itemize
+
+
+
+
Modified: llvm-gcc-4.2/trunk/gcc/doc/extend.texi
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/doc/extend.texi?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/doc/extend.texi (original)
+++ llvm-gcc-4.2/trunk/gcc/doc/extend.texi Wed Jul 22 15:36:27 2009
@@ -6532,9 +6532,11 @@
to those machines. Generally these generate calls to specific machine
instructions, but allow the compiler to schedule those calls.
+ at c APPLE LOCAL begin ARM NEON support. Merge from Codesourcery
@menu
* Alpha Built-in Functions::
* ARM Built-in Functions::
+* ARM NEON Intrinsics::
* Blackfin Built-in Functions::
* FR-V Built-in Functions::
* X86 Built-in Functions::
@@ -6543,6 +6545,7 @@
* PowerPC AltiVec Built-in Functions::
* SPARC VIS Built-in Functions::
@end menu
+ at c APPLE LOCAL end ARM NEON support. Merge from Codesourcery
@node Alpha Built-in Functions
@subsection Alpha Built-in Functions
@@ -6773,6 +6776,16 @@
long long __builtin_arm_wzero ()
@end smallexample
+ at c APPLE LOCAL begin ARM NEON support
+ at node ARM NEON Intrinsics
+ at subsection ARM NEON Intrinsics
+
+These built-in intrinsics for the ARM Advanced SIMD extension are available
+when the @option{-mfpu=neon} switch is used:
+
+ at include arm-neon-intrinsics.texi
+ at c APPLE LOCAL end ARM NEON support. Merge from Codesourcery.
+
@node Blackfin Built-in Functions
@subsection Blackfin Built-in Functions
Modified: llvm-gcc-4.2/trunk/gcc/doc/invoke.texi
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/doc/invoke.texi?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/doc/invoke.texi (original)
+++ llvm-gcc-4.2/trunk/gcc/doc/invoke.texi Wed Jul 22 15:36:27 2009
@@ -1430,8 +1430,8 @@
@c APPLE LOCAL begin CW asm blocks
@item -fasm-blocks
Enable the use of blocks and entire functions of assembly code within
-a C or C++ file. The syntax follows that used in CodeWarrior. (APPLE
-ONLY)
+a C or C++ file. The syntax follows that used in CodeWarrior. This
+option is not supported for ARM targets. (APPLE ONLY)
@c APPLE LOCAL end CW asm blocks
@item -fno-asm
@@ -8511,10 +8511,14 @@
there is a function name embedded immediately preceding this location
and has length @code{((pc[-3]) & 0xff000000)}.
+ at c APPLE LOCAL begin v7 thumb is default
@item -mthumb
@opindex mthumb
-Generate code for the 16-bit Thumb instruction set. The default is to
-use the 32-bit ARM instruction set.
+Generate code for the 16-bit Thumb instruction set. For ARMv7, the default
+is to use the THUMB2 instruction set. For all other architectures, the default
+is to use the 32-bit ARM instruction set. The ARM instruction set may be
+explicitly selected via @option{-mno-thumb} or @option{-marm}.
+ at c APPLE LOCAL end v7 thumb is default
@item -mtpcs-frame
@opindex mtpcs-frame
Modified: llvm-gcc-4.2/trunk/gcc/ifcvt.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/ifcvt.c?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/ifcvt.c (original)
+++ llvm-gcc-4.2/trunk/gcc/ifcvt.c Wed Jul 22 15:36:27 2009
@@ -522,13 +522,16 @@
{
basic_block bb = test_bb;
basic_block last_test_bb = ce_info->last_test_bb;
- int mod_ok = 0;
+ /* APPLE LOCAL 6370037 over-aggressive if conversion */
+ /* Line deleted */
if (! false_expr)
goto fail;
do
{
+ /* APPLE LOCAL 6370037 over-aggressive if conversion */
+ int mod_ok = 0;
rtx start, end;
rtx t, f;
enum rtx_code f_code;
Modified: llvm-gcc-4.2/trunk/gcc/libgcov.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/libgcov.c?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/libgcov.c (original)
+++ llvm-gcc-4.2/trunk/gcc/libgcov.c Wed Jul 22 15:36:27 2009
@@ -35,7 +35,6 @@
#include "tm.h"
/* APPLE LOCAL begin instant off 6414141 */
-/* LLVM LOCAL - not __arm__ */
#if defined(__APPLE__) && !defined(__STATIC__) && !defined(__ppc__) && !defined(__ppc64__) && !defined(__arm__)
#include <vproc.h>
#if defined(VPROC_HAS_TRANSACTIONS)
@@ -161,7 +160,6 @@
}
/* APPLE LOCAL begin instant off 6414141 */
-/* LLVM LOCAL - not __arm__ */
#if defined(__APPLE__) && !defined(__STATIC__) && !defined(__ppc__) && !defined(__ppc64__) && !defined(__arm__)
#if defined(VPROC_HAS_TRANSACTIONS)
static vproc_transaction_t gcov_trans;
Modified: llvm-gcc-4.2/trunk/gcc/local-alloc.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/local-alloc.c?rev=76781&r1=76780&r2=76781&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/local-alloc.c (original)
+++ llvm-gcc-4.2/trunk/gcc/local-alloc.c Wed Jul 22 15:36:27 2009
@@ -380,7 +380,10 @@
/* APPLE LOCAL begin 5695218 */
gcc_assert (!reg_inheritance_matrix);
- if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
+ /* The max_regno check limits the size of the reg_inheritance_matrix
+ to avoid malloc failure. 10033^2 / 8 = 12MB. */
+ if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
+ && max_regno <= 10033)
{
reg_inheritance_matrix = sbitmap_vector_alloc (max_regno, max_regno);
sbitmap_vector_zero (reg_inheritance_matrix, max_regno);
@@ -392,7 +395,7 @@
update_equiv_regs ();
/* APPLE LOCAL begin 5695218 */
- if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
+ if (reg_inheritance_matrix)
{
reg_inheritance ();
sbitmap_vector_free (reg_inheritance_matrix);
@@ -893,7 +896,7 @@
src = SET_SRC (set);
/* APPLE LOCAL begin 5695218 */
- if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
+ if (reg_inheritance_matrix)
{
int dstregno;
if (REG_P (dest))
Added: llvm-gcc-4.2/trunk/gcc/testsuite/g++.apple/visibility-4.C
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/g%2B%2B.apple/visibility-4.C?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/g++.apple/visibility-4.C (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/g++.apple/visibility-4.C Wed Jul 22 15:36:27 2009
@@ -0,0 +1,13 @@
+// APPLE LOCAL file 6983171 */
+/* { dg-require-visibility "" } */
+/* { dg-options "-fvisibility-ms-compat" } */
+
+int foo() {
+ try {
+ throw (int*)0;
+ } catch (...) {
+ return 0;
+ }
+
+ return 1;
+}
Added: llvm-gcc-4.2/trunk/gcc/testsuite/g++.dg/abi/mangle-neon.C
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/g%2B%2B.dg/abi/mangle-neon.C?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/g++.dg/abi/mangle-neon.C (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/g++.dg/abi/mangle-neon.C Wed Jul 22 15:36:27 2009
@@ -0,0 +1,48 @@
+// APPLE LOCAL file v7 merge
+// Test that ARM NEON vector types have their names mangled correctly.
+
+// { dg-do compile }
+// { dg-require-effective-target arm_neon_ok }
+// { dg-options "-mfpu=neon -mfloat-abi=softfp" }
+
+#include <arm_neon.h>
+
+void f0 (int8x8_t a) {}
+void f1 (int16x4_t a) {}
+void f2 (int32x2_t a) {}
+void f3 (uint8x8_t a) {}
+void f4 (uint16x4_t a) {}
+void f5 (uint32x2_t a) {}
+void f6 (float32x2_t a) {}
+void f7 (poly8x8_t a) {}
+void f8 (poly16x4_t a) {}
+
+void f9 (int8x16_t a) {}
+void f10 (int16x8_t a) {}
+void f11 (int32x4_t a) {}
+void f12 (uint8x16_t a) {}
+void f13 (uint16x8_t a) {}
+void f14 (uint32x4_t a) {}
+void f15 (float32x4_t a) {}
+void f16 (poly8x16_t a) {}
+void f17 (poly16x8_t a) {}
+
+// { dg-final { scan-assembler "_Z2f015__simd64_int8_t:" } }
+// { dg-final { scan-assembler "_Z2f116__simd64_int16_t:" } }
+// { dg-final { scan-assembler "_Z2f216__simd64_int32_t:" } }
+// { dg-final { scan-assembler "_Z2f316__simd64_uint8_t:" } }
+// { dg-final { scan-assembler "_Z2f417__simd64_uint16_t:" } }
+// { dg-final { scan-assembler "_Z2f517__simd64_uint32_t:" } }
+// { dg-final { scan-assembler "_Z2f618__simd64_float32_t:" } }
+// { dg-final { scan-assembler "_Z2f716__simd64_poly8_t:" } }
+// { dg-final { scan-assembler "_Z2f817__simd64_poly16_t:" } }
+// { dg-final { scan-assembler "_Z2f916__simd128_int8_t:" } }
+// { dg-final { scan-assembler "_Z3f1017__simd128_int16_t:" } }
+// { dg-final { scan-assembler "_Z3f1117__simd128_int32_t:" } }
+// { dg-final { scan-assembler "_Z3f1217__simd128_uint8_t:" } }
+// { dg-final { scan-assembler "_Z3f1318__simd128_uint16_t:" } }
+// { dg-final { scan-assembler "_Z3f1418__simd128_uint32_t:" } }
+// { dg-final { scan-assembler "_Z3f1519__simd128_float32_t:" } }
+// { dg-final { scan-assembler "_Z3f1617__simd128_poly8_t:" } }
+// { dg-final { scan-assembler "_Z3f1718__simd128_poly16_t:" } }
+
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.apple/6251664.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.apple/6251664.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.apple/6251664.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.apple/6251664.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,16 @@
+/* APPLE LOCAL file 6251664 */
+/* Verify that correct code is generated for a multiply-and-decrement
+ operation. */
+/* { dg-options "-O2" } */
+/* { dg-do run } */
+float f1 = 1.0, f2 = 2.0, f3 = 3.0, f4 = 4.0;
+
+void abort (void);
+
+int main (void)
+{
+ if (((f3 * f4) - (f1 * f2)) != 10.0)
+ abort();
+ return 0;
+}
+
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.apple/condexec-2.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.apple/condexec-2.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.apple/condexec-2.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.apple/condexec-2.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,30 @@
+/* APPLE LOCAL file 6280380 */
+/* The ARM backend was not recognizing that some operations cannot be
+ predicated, and was blindly generating un-predicated operations,
+ even though the branches were removed. */
+/* { dg-do run } */
+/* { dg-options "-Oz -marm" { target arm*-*-darwin* } } */
+
+extern void abort (void);
+
+int x = 1;
+float one = 1.0;
+
+float foobar (float a, float b, float c, float d)
+{
+ if (x)
+ return a + b;
+ else
+ return c - d;
+}
+
+int main (void)
+{
+ float result;
+
+ result = foobar (one, one, one, one);
+ if ((result) != 2.0f)
+ abort ();
+ return 0;
+}
+
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/neon.exp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/neon.exp?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/neon.exp (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/neon.exp Wed Jul 22 15:36:27 2009
@@ -0,0 +1,36 @@
+# APPLE LOCAL file v7 merge
+# Copyright (C) 1997, 2004, 2006 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Exit immediately if this isn't an ARM target.
+if ![istarget arm*-*-*] then {
+ return
+}
+
+# Load support procs.
+load_lib gcc-dg.exp
+
+# Initialize `dg'.
+dg-init
+
+# Main loop.
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
+ "" ""
+
+# All done.
+dg-finish
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/polytypes.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/polytypes.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/polytypes.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/polytypes.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,48 @@
+/* APPLE LOCAL file v7 merge */
+/* Check that NEON polynomial vector types are suitably incompatible with
+ integer vector types of the same layout. */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-mfpu=neon -mfloat-abi=softfp -fno-lax-vector-conversions" } */
+
+#include <arm_neon.h>
+
+void s64_8 (int8x8_t a) {}
+void u64_8 (uint8x8_t a) {}
+void p64_8 (poly8x8_t a) {}
+void s64_16 (int16x4_t a) {}
+void u64_16 (uint16x4_t a) {}
+void p64_16 (poly16x4_t a) {}
+
+void s128_8 (int8x16_t a) {}
+void u128_8 (uint8x16_t a) {}
+void p128_8 (poly8x16_t a) {}
+void s128_16 (int16x8_t a) {}
+void u128_16 (uint16x8_t a) {}
+void p128_16 (poly16x8_t a) {}
+
+void foo ()
+{
+ poly8x8_t v64_8;
+ poly16x4_t v64_16;
+ poly8x16_t v128_8;
+ poly16x8_t v128_16;
+
+ s64_8 (v64_8); /* { dg-error "use -flax-vector-conversions.*incompatible type for argument 1 of 's64_8'" } */
+ u64_8 (v64_8); /* { dg-error "incompatible type for argument 1 of 'u64_8'" } */
+ p64_8 (v64_8);
+
+ s64_16 (v64_16); /* { dg-error "incompatible type for argument 1 of 's64_16'" } */
+ u64_16 (v64_16); /* { dg-error "incompatible type for argument 1 of 'u64_16'" } */
+ p64_16 (v64_16);
+
+ s128_8 (v128_8); /* { dg-error "incompatible type for argument 1 of 's128_8'" } */
+ u128_8 (v128_8); /* { dg-error "incompatible type for argument 1 of 'u128_8'" } */
+ p128_8 (v128_8);
+
+ s128_16 (v128_16); /* { dg-error "incompatible type for argument 1 of 's128_16'" } */
+ u128_16 (v128_16); /* { dg-error "incompatible type for argument 1 of 'u128_16'" } */
+ p128_16 (v128_16);
+}
+
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhns16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhns16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhns16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhns16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRaddhns16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRaddhns16 (void)
+{
+ int8x8_t out_int8x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_int8x8_t = vraddhn_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vraddhn\.i16\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhns32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhns32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhns32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhns32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRaddhns32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRaddhns32 (void)
+{
+ int16x4_t out_int16x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_int16x4_t = vraddhn_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vraddhn\.i32\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhns64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhns64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhns64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhns64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRaddhns64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRaddhns64 (void)
+{
+ int32x2_t out_int32x2_t;
+ int64x2_t arg0_int64x2_t;
+ int64x2_t arg1_int64x2_t;
+
+ out_int32x2_t = vraddhn_s64 (arg0_int64x2_t, arg1_int64x2_t);
+}
+
+/* { dg-final { scan-assembler "vraddhn\.i64\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhnu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhnu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhnu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhnu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRaddhnu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRaddhnu16 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint8x8_t = vraddhn_u16 (arg0_uint16x8_t, arg1_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vraddhn\.i16\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhnu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhnu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhnu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhnu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRaddhnu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRaddhnu32 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint16x4_t = vraddhn_u32 (arg0_uint32x4_t, arg1_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vraddhn\.i32\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhnu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhnu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhnu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRaddhnu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRaddhnu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRaddhnu64 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint64x2_t arg0_uint64x2_t;
+ uint64x2_t arg1_uint64x2_t;
+
+ out_uint32x2_t = vraddhn_u64 (arg0_uint64x2_t, arg1_uint64x2_t);
+}
+
+/* { dg-final { scan-assembler "vraddhn\.i64\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRhaddQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRhaddQs16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_int16x8_t = vrhaddq_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vrhadd\.s16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRhaddQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRhaddQs32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_int32x4_t = vrhaddq_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vrhadd\.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRhaddQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRhaddQs8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_int8x16_t = vrhaddq_s8 (arg0_int8x16_t, arg1_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vrhadd\.s8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRhaddQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRhaddQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint16x8_t = vrhaddq_u16 (arg0_uint16x8_t, arg1_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vrhadd\.u16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRhaddQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRhaddQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint32x4_t = vrhaddq_u32 (arg0_uint32x4_t, arg1_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vrhadd\.u32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRhaddQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRhaddQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+
+ out_uint8x16_t = vrhaddq_u8 (arg0_uint8x16_t, arg1_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "vrhadd\.u8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhadds16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhadds16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhadds16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhadds16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRhadds16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRhadds16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_int16x4_t = vrhadd_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vrhadd\.s16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhadds32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhadds32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhadds32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhadds32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRhadds32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRhadds32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_int32x2_t = vrhadd_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vrhadd\.s32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhadds8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhadds8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhadds8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhadds8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRhadds8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRhadds8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_int8x8_t = vrhadd_s8 (arg0_int8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vrhadd\.s8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRhaddu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRhaddu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint16x4_t = vrhadd_u16 (arg0_uint16x4_t, arg1_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vrhadd\.u16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRhaddu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRhaddu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint32x2_t = vrhadd_u32 (arg0_uint32x2_t, arg1_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vrhadd\.u32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRhaddu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRhaddu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRhaddu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint8x8_t = vrhadd_u8 (arg0_uint8x8_t, arg1_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vrhadd\.u8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshlQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshlQs16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_int16x8_t = vrshlq_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.s16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshlQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshlQs32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_int32x4_t = vrshlq_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshlQs64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshlQs64 (void)
+{
+ int64x2_t out_int64x2_t;
+ int64x2_t arg0_int64x2_t;
+ int64x2_t arg1_int64x2_t;
+
+ out_int64x2_t = vrshlq_s64 (arg0_int64x2_t, arg1_int64x2_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.s64\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshlQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshlQs8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_int8x16_t = vrshlq_s8 (arg0_int8x16_t, arg1_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.s8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshlQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshlQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_uint16x8_t = vrshlq_u16 (arg0_uint16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.u16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshlQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshlQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_uint32x4_t = vrshlq_u32 (arg0_uint32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.u32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshlQu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshlQu64 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint64x2_t arg0_uint64x2_t;
+ int64x2_t arg1_int64x2_t;
+
+ out_uint64x2_t = vrshlq_u64 (arg0_uint64x2_t, arg1_int64x2_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.u64\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshlQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshlQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_uint8x16_t = vrshlq_u8 (arg0_uint8x16_t, arg1_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.u8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshls16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshls16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_int16x4_t = vrshl_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.s16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshls32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshls32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_int32x2_t = vrshl_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.s32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshls64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshls64 (void)
+{
+ int64x1_t out_int64x1_t;
+ int64x1_t arg0_int64x1_t;
+ int64x1_t arg1_int64x1_t;
+
+ out_int64x1_t = vrshl_s64 (arg0_int64x1_t, arg1_int64x1_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.s64\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshls8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshls8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshls8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_int8x8_t = vrshl_s8 (arg0_int8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.s8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshlu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshlu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_uint16x4_t = vrshl_u16 (arg0_uint16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.u16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshlu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshlu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_uint32x2_t = vrshl_u32 (arg0_uint32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.u32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshlu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshlu64 (void)
+{
+ uint64x1_t out_uint64x1_t;
+ uint64x1_t arg0_uint64x1_t;
+ int64x1_t arg1_int64x1_t;
+
+ out_uint64x1_t = vrshl_u64 (arg0_uint64x1_t, arg1_int64x1_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.u64\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshlu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshlu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshlu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_uint8x8_t = vrshl_u8 (arg0_uint8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vrshl\.u8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshrQ_ns16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshrQ_ns16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+
+ out_int16x8_t = vrshrq_n_s16 (arg0_int16x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.s16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshrQ_ns32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshrQ_ns32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+
+ out_int32x4_t = vrshrq_n_s32 (arg0_int32x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshrQ_ns64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshrQ_ns64 (void)
+{
+ int64x2_t out_int64x2_t;
+ int64x2_t arg0_int64x2_t;
+
+ out_int64x2_t = vrshrq_n_s64 (arg0_int64x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.s64\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_ns8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshrQ_ns8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshrQ_ns8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+
+ out_int8x16_t = vrshrq_n_s8 (arg0_int8x16_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.s8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshrQ_nu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshrQ_nu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+
+ out_uint16x8_t = vrshrq_n_u16 (arg0_uint16x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.u16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshrQ_nu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshrQ_nu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+
+ out_uint32x4_t = vrshrq_n_u32 (arg0_uint32x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.u32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshrQ_nu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshrQ_nu64 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint64x2_t arg0_uint64x2_t;
+
+ out_uint64x2_t = vrshrq_n_u64 (arg0_uint64x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.u64\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrQ_nu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshrQ_nu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshrQ_nu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+
+ out_uint8x16_t = vrshrq_n_u8 (arg0_uint8x16_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.u8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshr_ns16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshr_ns16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+
+ out_int16x4_t = vrshr_n_s16 (arg0_int16x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.s16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshr_ns32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshr_ns32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+
+ out_int32x2_t = vrshr_n_s32 (arg0_int32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.s32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshr_ns64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshr_ns64 (void)
+{
+ int64x1_t out_int64x1_t;
+ int64x1_t arg0_int64x1_t;
+
+ out_int64x1_t = vrshr_n_s64 (arg0_int64x1_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.s64\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_ns8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshr_ns8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshr_ns8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+
+ out_int8x8_t = vrshr_n_s8 (arg0_int8x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.s8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshr_nu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshr_nu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+
+ out_uint16x4_t = vrshr_n_u16 (arg0_uint16x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.u16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshr_nu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshr_nu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+
+ out_uint32x2_t = vrshr_n_u32 (arg0_uint32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.u32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshr_nu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshr_nu64 (void)
+{
+ uint64x1_t out_uint64x1_t;
+ uint64x1_t arg0_uint64x1_t;
+
+ out_uint64x1_t = vrshr_n_u64 (arg0_uint64x1_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.u64\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshr_nu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshr_nu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshr_nu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+
+ out_uint8x8_t = vrshr_n_u8 (arg0_uint8x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshr\.u8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_ns16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_ns16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_ns16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_ns16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshrn_ns16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshrn_ns16 (void)
+{
+ int8x8_t out_int8x8_t;
+ int16x8_t arg0_int16x8_t;
+
+ out_int8x8_t = vrshrn_n_s16 (arg0_int16x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshrn\.i16\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_ns32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_ns32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_ns32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_ns32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshrn_ns32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshrn_ns32 (void)
+{
+ int16x4_t out_int16x4_t;
+ int32x4_t arg0_int32x4_t;
+
+ out_int16x4_t = vrshrn_n_s32 (arg0_int32x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshrn\.i32\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_ns64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_ns64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_ns64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_ns64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshrn_ns64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshrn_ns64 (void)
+{
+ int32x2_t out_int32x2_t;
+ int64x2_t arg0_int64x2_t;
+
+ out_int32x2_t = vrshrn_n_s64 (arg0_int64x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshrn\.i64\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_nu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_nu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_nu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_nu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshrn_nu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshrn_nu16 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint16x8_t arg0_uint16x8_t;
+
+ out_uint8x8_t = vrshrn_n_u16 (arg0_uint16x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshrn\.i16\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_nu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_nu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_nu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_nu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshrn_nu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshrn_nu32 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint32x4_t arg0_uint32x4_t;
+
+ out_uint16x4_t = vrshrn_n_u32 (arg0_uint32x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshrn\.i32\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_nu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_nu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_nu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRshrn_nu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRshrn_nu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRshrn_nu64 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint64x2_t arg0_uint64x2_t;
+
+ out_uint32x2_t = vrshrn_n_u64 (arg0_uint64x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrshrn\.i64\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsraQ_ns16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsraQ_ns16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_int16x8_t = vrsraq_n_s16 (arg0_int16x8_t, arg1_int16x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.s16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsraQ_ns32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsraQ_ns32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_int32x4_t = vrsraq_n_s32 (arg0_int32x4_t, arg1_int32x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsraQ_ns64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsraQ_ns64 (void)
+{
+ int64x2_t out_int64x2_t;
+ int64x2_t arg0_int64x2_t;
+ int64x2_t arg1_int64x2_t;
+
+ out_int64x2_t = vrsraq_n_s64 (arg0_int64x2_t, arg1_int64x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.s64\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_ns8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsraQ_ns8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsraQ_ns8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_int8x16_t = vrsraq_n_s8 (arg0_int8x16_t, arg1_int8x16_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.s8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsraQ_nu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsraQ_nu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint16x8_t = vrsraq_n_u16 (arg0_uint16x8_t, arg1_uint16x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.u16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsraQ_nu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsraQ_nu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint32x4_t = vrsraq_n_u32 (arg0_uint32x4_t, arg1_uint32x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.u32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsraQ_nu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsraQ_nu64 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint64x2_t arg0_uint64x2_t;
+ uint64x2_t arg1_uint64x2_t;
+
+ out_uint64x2_t = vrsraq_n_u64 (arg0_uint64x2_t, arg1_uint64x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.u64\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsraQ_nu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsraQ_nu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsraQ_nu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+
+ out_uint8x16_t = vrsraq_n_u8 (arg0_uint8x16_t, arg1_uint8x16_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.u8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsra_ns16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsra_ns16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_int16x4_t = vrsra_n_s16 (arg0_int16x4_t, arg1_int16x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.s16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsra_ns32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsra_ns32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_int32x2_t = vrsra_n_s32 (arg0_int32x2_t, arg1_int32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.s32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsra_ns64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsra_ns64 (void)
+{
+ int64x1_t out_int64x1_t;
+ int64x1_t arg0_int64x1_t;
+ int64x1_t arg1_int64x1_t;
+
+ out_int64x1_t = vrsra_n_s64 (arg0_int64x1_t, arg1_int64x1_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.s64\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_ns8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsra_ns8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsra_ns8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_int8x8_t = vrsra_n_s8 (arg0_int8x8_t, arg1_int8x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.s8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsra_nu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsra_nu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint16x4_t = vrsra_n_u16 (arg0_uint16x4_t, arg1_uint16x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.u16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsra_nu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsra_nu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint32x2_t = vrsra_n_u32 (arg0_uint32x2_t, arg1_uint32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.u32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsra_nu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsra_nu64 (void)
+{
+ uint64x1_t out_uint64x1_t;
+ uint64x1_t arg0_uint64x1_t;
+ uint64x1_t arg1_uint64x1_t;
+
+ out_uint64x1_t = vrsra_n_u64 (arg0_uint64x1_t, arg1_uint64x1_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.u64\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsra_nu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsra_nu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsra_nu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint8x8_t = vrsra_n_u8 (arg0_uint8x8_t, arg1_uint8x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vrsra\.u8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhns16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhns16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhns16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhns16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsubhns16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsubhns16 (void)
+{
+ int8x8_t out_int8x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_int8x8_t = vrsubhn_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vrsubhn\.i16\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhns32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhns32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhns32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhns32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsubhns32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsubhns32 (void)
+{
+ int16x4_t out_int16x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_int16x4_t = vrsubhn_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vrsubhn\.i32\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhns64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhns64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhns64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhns64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsubhns64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsubhns64 (void)
+{
+ int32x2_t out_int32x2_t;
+ int64x2_t arg0_int64x2_t;
+ int64x2_t arg1_int64x2_t;
+
+ out_int32x2_t = vrsubhn_s64 (arg0_int64x2_t, arg1_int64x2_t);
+}
+
+/* { dg-final { scan-assembler "vrsubhn\.i64\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhnu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhnu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhnu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhnu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsubhnu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsubhnu16 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint8x8_t = vrsubhn_u16 (arg0_uint16x8_t, arg1_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vrsubhn\.i16\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhnu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhnu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhnu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhnu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsubhnu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsubhnu32 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint16x4_t = vrsubhn_u32 (arg0_uint32x4_t, arg1_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vrsubhn\.i32\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhnu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhnu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhnu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vRsubhnu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vRsubhnu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vRsubhnu64 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint64x2_t arg0_uint64x2_t;
+ uint64x2_t arg1_uint64x2_t;
+
+ out_uint32x2_t = vrsubhn_u64 (arg0_uint64x2_t, arg1_uint64x2_t);
+}
+
+/* { dg-final { scan-assembler "vrsubhn\.i64\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabaQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabaQs16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+ int16x8_t arg2_int16x8_t;
+
+ out_int16x8_t = vabaq_s16 (arg0_int16x8_t, arg1_int16x8_t, arg2_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vaba\.s16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabaQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabaQs32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+ int32x4_t arg2_int32x4_t;
+
+ out_int32x4_t = vabaq_s32 (arg0_int32x4_t, arg1_int32x4_t, arg2_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vaba\.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabaQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabaQs8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+ int8x16_t arg2_int8x16_t;
+
+ out_int8x16_t = vabaq_s8 (arg0_int8x16_t, arg1_int8x16_t, arg2_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vaba\.s8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabaQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabaQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+ uint16x8_t arg2_uint16x8_t;
+
+ out_uint16x8_t = vabaq_u16 (arg0_uint16x8_t, arg1_uint16x8_t, arg2_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vaba\.u16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabaQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabaQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+ uint32x4_t arg2_uint32x4_t;
+
+ out_uint32x4_t = vabaq_u32 (arg0_uint32x4_t, arg1_uint32x4_t, arg2_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vaba\.u32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabaQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabaQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabaQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+ uint8x16_t arg2_uint8x16_t;
+
+ out_uint8x16_t = vabaq_u8 (arg0_uint8x16_t, arg1_uint8x16_t, arg2_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "vaba\.u8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabals16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabals16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabals16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabals16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabals16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabals16 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int16x4_t arg1_int16x4_t;
+ int16x4_t arg2_int16x4_t;
+
+ out_int32x4_t = vabal_s16 (arg0_int32x4_t, arg1_int16x4_t, arg2_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vabal\.s16\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabals32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabals32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabals32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabals32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabals32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabals32 (void)
+{
+ int64x2_t out_int64x2_t;
+ int64x2_t arg0_int64x2_t;
+ int32x2_t arg1_int32x2_t;
+ int32x2_t arg2_int32x2_t;
+
+ out_int64x2_t = vabal_s32 (arg0_int64x2_t, arg1_int32x2_t, arg2_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vabal\.s32\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabals8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabals8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabals8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabals8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabals8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabals8 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int8x8_t arg1_int8x8_t;
+ int8x8_t arg2_int8x8_t;
+
+ out_int16x8_t = vabal_s8 (arg0_int16x8_t, arg1_int8x8_t, arg2_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vabal\.s8\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabalu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabalu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabalu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabalu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabalu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabalu16 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint16x4_t arg1_uint16x4_t;
+ uint16x4_t arg2_uint16x4_t;
+
+ out_uint32x4_t = vabal_u16 (arg0_uint32x4_t, arg1_uint16x4_t, arg2_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vabal\.u16\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabalu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabalu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabalu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabalu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabalu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabalu32 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint64x2_t arg0_uint64x2_t;
+ uint32x2_t arg1_uint32x2_t;
+ uint32x2_t arg2_uint32x2_t;
+
+ out_uint64x2_t = vabal_u32 (arg0_uint64x2_t, arg1_uint32x2_t, arg2_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vabal\.u32\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabalu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabalu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabalu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabalu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabalu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabalu8 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint8x8_t arg1_uint8x8_t;
+ uint8x8_t arg2_uint8x8_t;
+
+ out_uint16x8_t = vabal_u8 (arg0_uint16x8_t, arg1_uint8x8_t, arg2_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vabal\.u8\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabas16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabas16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabas16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabas16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabas16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabas16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+ int16x4_t arg2_int16x4_t;
+
+ out_int16x4_t = vaba_s16 (arg0_int16x4_t, arg1_int16x4_t, arg2_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vaba\.s16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabas32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabas32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabas32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabas32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabas32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabas32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+ int32x2_t arg2_int32x2_t;
+
+ out_int32x2_t = vaba_s32 (arg0_int32x2_t, arg1_int32x2_t, arg2_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vaba\.s32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabas8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabas8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabas8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabas8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabas8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabas8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+ int8x8_t arg2_int8x8_t;
+
+ out_int8x8_t = vaba_s8 (arg0_int8x8_t, arg1_int8x8_t, arg2_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vaba\.s8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabau16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabau16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabau16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabau16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabau16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabau16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+ uint16x4_t arg2_uint16x4_t;
+
+ out_uint16x4_t = vaba_u16 (arg0_uint16x4_t, arg1_uint16x4_t, arg2_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vaba\.u16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabau32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabau32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabau32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabau32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabau32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabau32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+ uint32x2_t arg2_uint32x2_t;
+
+ out_uint32x2_t = vaba_u32 (arg0_uint32x2_t, arg1_uint32x2_t, arg2_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vaba\.u32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabau8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabau8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabau8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabau8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabau8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabau8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+ uint8x8_t arg2_uint8x8_t;
+
+ out_uint8x8_t = vaba_u8 (arg0_uint8x8_t, arg1_uint8x8_t, arg2_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vaba\.u8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdQf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdQf32 (void)
+{
+ float32x4_t out_float32x4_t;
+ float32x4_t arg0_float32x4_t;
+ float32x4_t arg1_float32x4_t;
+
+ out_float32x4_t = vabdq_f32 (arg0_float32x4_t, arg1_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vabd\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdQs16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_int16x8_t = vabdq_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vabd\.s16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdQs32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_int32x4_t = vabdq_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vabd\.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdQs8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_int8x16_t = vabdq_s8 (arg0_int8x16_t, arg1_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vabd\.s8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint16x8_t = vabdq_u16 (arg0_uint16x8_t, arg1_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vabd\.u16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint32x4_t = vabdq_u32 (arg0_uint32x4_t, arg1_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vabd\.u32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+
+ out_uint8x16_t = vabdq_u8 (arg0_uint8x16_t, arg1_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "vabd\.u8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdf32 (void)
+{
+ float32x2_t out_float32x2_t;
+ float32x2_t arg0_float32x2_t;
+ float32x2_t arg1_float32x2_t;
+
+ out_float32x2_t = vabd_f32 (arg0_float32x2_t, arg1_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vabd\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdls16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdls16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdls16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdls16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdls16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdls16 (void)
+{
+ int32x4_t out_int32x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_int32x4_t = vabdl_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vabdl\.s16\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdls32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdls32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdls32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdls32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdls32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdls32 (void)
+{
+ int64x2_t out_int64x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_int64x2_t = vabdl_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vabdl\.s32\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdls8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdls8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdls8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdls8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdls8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdls8 (void)
+{
+ int16x8_t out_int16x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_int16x8_t = vabdl_s8 (arg0_int8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vabdl\.s8\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdlu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdlu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdlu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdlu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdlu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdlu16 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint32x4_t = vabdl_u16 (arg0_uint16x4_t, arg1_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vabdl\.u16\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdlu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdlu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdlu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdlu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdlu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdlu32 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint64x2_t = vabdl_u32 (arg0_uint32x2_t, arg1_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vabdl\.u32\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdlu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdlu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdlu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdlu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdlu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdlu8 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint16x8_t = vabdl_u8 (arg0_uint8x8_t, arg1_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vabdl\.u8\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabds16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabds16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabds16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabds16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabds16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabds16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_int16x4_t = vabd_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vabd\.s16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabds32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabds32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabds32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabds32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabds32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabds32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_int32x2_t = vabd_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vabd\.s32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabds8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabds8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabds8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabds8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabds8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabds8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_int8x8_t = vabd_s8 (arg0_int8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vabd\.s8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint16x4_t = vabd_u16 (arg0_uint16x4_t, arg1_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vabd\.u16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint32x2_t = vabd_u32 (arg0_uint32x2_t, arg1_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vabd\.u32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabdu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabdu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabdu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint8x8_t = vabd_u8 (arg0_uint8x8_t, arg1_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vabd\.u8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabsQf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabsQf32 (void)
+{
+ float32x4_t out_float32x4_t;
+ float32x4_t arg0_float32x4_t;
+
+ out_float32x4_t = vabsq_f32 (arg0_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vabs\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabsQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabsQs16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+
+ out_int16x8_t = vabsq_s16 (arg0_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vabs\.s16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabsQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabsQs32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+
+ out_int32x4_t = vabsq_s32 (arg0_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vabs\.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabsQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabsQs8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+
+ out_int8x16_t = vabsq_s8 (arg0_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vabs\.s8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabsf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabsf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabsf32 (void)
+{
+ float32x2_t out_float32x2_t;
+ float32x2_t arg0_float32x2_t;
+
+ out_float32x2_t = vabs_f32 (arg0_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vabs\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabss16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabss16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabss16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabss16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabss16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabss16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+
+ out_int16x4_t = vabs_s16 (arg0_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vabs\.s16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabss32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabss32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabss32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabss32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabss32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabss32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+
+ out_int32x2_t = vabs_s32 (arg0_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vabs\.s32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabss8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabss8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabss8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vabss8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vabss8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vabss8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+
+ out_int8x8_t = vabs_s8 (arg0_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vabs\.s8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddQf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddQf32 (void)
+{
+ float32x4_t out_float32x4_t;
+ float32x4_t arg0_float32x4_t;
+ float32x4_t arg1_float32x4_t;
+
+ out_float32x4_t = vaddq_f32 (arg0_float32x4_t, arg1_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddQs16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_int16x8_t = vaddq_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddQs32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_int32x4_t = vaddq_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddQs64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddQs64 (void)
+{
+ int64x2_t out_int64x2_t;
+ int64x2_t arg0_int64x2_t;
+ int64x2_t arg1_int64x2_t;
+
+ out_int64x2_t = vaddq_s64 (arg0_int64x2_t, arg1_int64x2_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i64\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddQs8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_int8x16_t = vaddq_s8 (arg0_int8x16_t, arg1_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint16x8_t = vaddq_u16 (arg0_uint16x8_t, arg1_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint32x4_t = vaddq_u32 (arg0_uint32x4_t, arg1_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddQu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddQu64 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint64x2_t arg0_uint64x2_t;
+ uint64x2_t arg1_uint64x2_t;
+
+ out_uint64x2_t = vaddq_u64 (arg0_uint64x2_t, arg1_uint64x2_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i64\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+
+ out_uint8x16_t = vaddq_u8 (arg0_uint8x16_t, arg1_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddf32 (void)
+{
+ float32x2_t out_float32x2_t;
+ float32x2_t arg0_float32x2_t;
+ float32x2_t arg1_float32x2_t;
+
+ out_float32x2_t = vadd_f32 (arg0_float32x2_t, arg1_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhns16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhns16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhns16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhns16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddhns16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddhns16 (void)
+{
+ int8x8_t out_int8x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_int8x8_t = vaddhn_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vaddhn\.i16\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhns32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhns32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhns32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhns32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddhns32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddhns32 (void)
+{
+ int16x4_t out_int16x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_int16x4_t = vaddhn_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vaddhn\.i32\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhns64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhns64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhns64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhns64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddhns64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddhns64 (void)
+{
+ int32x2_t out_int32x2_t;
+ int64x2_t arg0_int64x2_t;
+ int64x2_t arg1_int64x2_t;
+
+ out_int32x2_t = vaddhn_s64 (arg0_int64x2_t, arg1_int64x2_t);
+}
+
+/* { dg-final { scan-assembler "vaddhn\.i64\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhnu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhnu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhnu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhnu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddhnu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddhnu16 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint8x8_t = vaddhn_u16 (arg0_uint16x8_t, arg1_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vaddhn\.i16\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhnu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhnu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhnu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhnu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddhnu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddhnu32 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint16x4_t = vaddhn_u32 (arg0_uint32x4_t, arg1_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vaddhn\.i32\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhnu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhnu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhnu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddhnu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddhnu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddhnu64 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint64x2_t arg0_uint64x2_t;
+ uint64x2_t arg1_uint64x2_t;
+
+ out_uint32x2_t = vaddhn_u64 (arg0_uint64x2_t, arg1_uint64x2_t);
+}
+
+/* { dg-final { scan-assembler "vaddhn\.i64\[ \]+\[dD\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddls16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddls16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddls16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddls16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddls16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddls16 (void)
+{
+ int32x4_t out_int32x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_int32x4_t = vaddl_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vaddl\.s16\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddls32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddls32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddls32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddls32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddls32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddls32 (void)
+{
+ int64x2_t out_int64x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_int64x2_t = vaddl_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vaddl\.s32\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddls8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddls8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddls8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddls8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddls8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddls8 (void)
+{
+ int16x8_t out_int16x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_int16x8_t = vaddl_s8 (arg0_int8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vaddl\.s8\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddlu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddlu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddlu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddlu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddlu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddlu16 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint32x4_t = vaddl_u16 (arg0_uint16x4_t, arg1_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vaddl\.u16\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddlu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddlu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddlu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddlu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddlu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddlu32 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint64x2_t = vaddl_u32 (arg0_uint32x2_t, arg1_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vaddl\.u32\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddlu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddlu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddlu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddlu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddlu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddlu8 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint16x8_t = vaddl_u8 (arg0_uint8x8_t, arg1_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vaddl\.u8\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vadds16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vadds16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_int16x4_t = vadd_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vadds32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vadds32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_int32x2_t = vadd_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vadds64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vadds64 (void)
+{
+ int64x1_t out_int64x1_t;
+ int64x1_t arg0_int64x1_t;
+ int64x1_t arg1_int64x1_t;
+
+ out_int64x1_t = vadd_s64 (arg0_int64x1_t, arg1_int64x1_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i64\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vadds8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vadds8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vadds8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_int8x8_t = vadd_s8 (arg0_int8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint16x4_t = vadd_u16 (arg0_uint16x4_t, arg1_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint32x2_t = vadd_u32 (arg0_uint32x2_t, arg1_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddu64 (void)
+{
+ uint64x1_t out_uint64x1_t;
+ uint64x1_t arg0_uint64x1_t;
+ uint64x1_t arg1_uint64x1_t;
+
+ out_uint64x1_t = vadd_u64 (arg0_uint64x1_t, arg1_uint64x1_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i64\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint8x8_t = vadd_u8 (arg0_uint8x8_t, arg1_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vadd\.i8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddws16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddws16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddws16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddws16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddws16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddws16 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_int32x4_t = vaddw_s16 (arg0_int32x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vaddw\.s16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddws32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddws32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddws32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddws32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddws32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddws32 (void)
+{
+ int64x2_t out_int64x2_t;
+ int64x2_t arg0_int64x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_int64x2_t = vaddw_s32 (arg0_int64x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vaddw\.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddws8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddws8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddws8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddws8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddws8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddws8 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_int16x8_t = vaddw_s8 (arg0_int16x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vaddw\.s8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddwu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddwu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddwu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddwu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddwu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddwu16 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint32x4_t = vaddw_u16 (arg0_uint32x4_t, arg1_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vaddw\.u16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddwu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddwu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddwu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddwu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddwu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddwu32 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint64x2_t arg0_uint64x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint64x2_t = vaddw_u32 (arg0_uint64x2_t, arg1_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vaddw\.u32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddwu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddwu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddwu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vaddwu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vaddwu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vaddwu8 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint16x8_t = vaddw_u8 (arg0_uint16x8_t, arg1_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vaddw\.u8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vandQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vandQs16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_int16x8_t = vandq_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vandQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vandQs32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_int32x4_t = vandq_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vandQs64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vandQs64 (void)
+{
+ int64x2_t out_int64x2_t;
+ int64x2_t arg0_int64x2_t;
+ int64x2_t arg1_int64x2_t;
+
+ out_int64x2_t = vandq_s64 (arg0_int64x2_t, arg1_int64x2_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vandQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vandQs8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_int8x16_t = vandq_s8 (arg0_int8x16_t, arg1_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vandQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vandQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint16x8_t = vandq_u16 (arg0_uint16x8_t, arg1_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vandQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vandQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint32x4_t = vandq_u32 (arg0_uint32x4_t, arg1_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vandQu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vandQu64 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint64x2_t arg0_uint64x2_t;
+ uint64x2_t arg1_uint64x2_t;
+
+ out_uint64x2_t = vandq_u64 (arg0_uint64x2_t, arg1_uint64x2_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vandQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vandQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+
+ out_uint8x16_t = vandq_u8 (arg0_uint8x16_t, arg1_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vands16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vands16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_int16x4_t = vand_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vands32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vands32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_int32x2_t = vand_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vands64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vands64 (void)
+{
+ int64x1_t out_int64x1_t;
+ int64x1_t arg0_int64x1_t;
+ int64x1_t arg1_int64x1_t;
+
+ out_int64x1_t = vand_s64 (arg0_int64x1_t, arg1_int64x1_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vands8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vands8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vands8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_int8x8_t = vand_s8 (arg0_int8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vandu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vandu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint16x4_t = vand_u16 (arg0_uint16x4_t, arg1_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vandu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vandu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint32x2_t = vand_u32 (arg0_uint32x2_t, arg1_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vandu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vandu64 (void)
+{
+ uint64x1_t out_uint64x1_t;
+ uint64x1_t arg0_uint64x1_t;
+ uint64x1_t arg1_uint64x1_t;
+
+ out_uint64x1_t = vand_u64 (arg0_uint64x1_t, arg1_uint64x1_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vandu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vandu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vandu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint8x8_t = vand_u8 (arg0_uint8x8_t, arg1_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vand\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbicQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbicQs16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_int16x8_t = vbicq_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbicQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbicQs32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_int32x4_t = vbicq_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbicQs64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbicQs64 (void)
+{
+ int64x2_t out_int64x2_t;
+ int64x2_t arg0_int64x2_t;
+ int64x2_t arg1_int64x2_t;
+
+ out_int64x2_t = vbicq_s64 (arg0_int64x2_t, arg1_int64x2_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbicQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbicQs8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_int8x16_t = vbicq_s8 (arg0_int8x16_t, arg1_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbicQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbicQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint16x8_t = vbicq_u16 (arg0_uint16x8_t, arg1_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbicQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbicQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint32x4_t = vbicq_u32 (arg0_uint32x4_t, arg1_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbicQu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbicQu64 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint64x2_t arg0_uint64x2_t;
+ uint64x2_t arg1_uint64x2_t;
+
+ out_uint64x2_t = vbicq_u64 (arg0_uint64x2_t, arg1_uint64x2_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbicQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbicQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+
+ out_uint8x16_t = vbicq_u8 (arg0_uint8x16_t, arg1_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbics16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbics16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_int16x4_t = vbic_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbics32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbics32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_int32x2_t = vbic_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbics64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbics64 (void)
+{
+ int64x1_t out_int64x1_t;
+ int64x1_t arg0_int64x1_t;
+ int64x1_t arg1_int64x1_t;
+
+ out_int64x1_t = vbic_s64 (arg0_int64x1_t, arg1_int64x1_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbics8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbics8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbics8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_int8x8_t = vbic_s8 (arg0_int8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbicu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbicu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint16x4_t = vbic_u16 (arg0_uint16x4_t, arg1_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbicu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbicu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint32x2_t = vbic_u32 (arg0_uint32x2_t, arg1_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbicu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbicu64 (void)
+{
+ uint64x1_t out_uint64x1_t;
+ uint64x1_t arg0_uint64x1_t;
+ uint64x1_t arg1_uint64x1_t;
+
+ out_uint64x1_t = vbic_u64 (arg0_uint64x1_t, arg1_uint64x1_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbicu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbicu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbicu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint8x8_t = vbic_u8 (arg0_uint8x8_t, arg1_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vbic\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslQf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslQf32 (void)
+{
+ float32x4_t out_float32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ float32x4_t arg1_float32x4_t;
+ float32x4_t arg2_float32x4_t;
+
+ out_float32x4_t = vbslq_f32 (arg0_uint32x4_t, arg1_float32x4_t, arg2_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQp16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQp16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQp16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQp16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslQp16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslQp16 (void)
+{
+ poly16x8_t out_poly16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ poly16x8_t arg1_poly16x8_t;
+ poly16x8_t arg2_poly16x8_t;
+
+ out_poly16x8_t = vbslq_p16 (arg0_uint16x8_t, arg1_poly16x8_t, arg2_poly16x8_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQp8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQp8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQp8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQp8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslQp8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslQp8 (void)
+{
+ poly8x16_t out_poly8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ poly8x16_t arg1_poly8x16_t;
+ poly8x16_t arg2_poly8x16_t;
+
+ out_poly8x16_t = vbslq_p8 (arg0_uint8x16_t, arg1_poly8x16_t, arg2_poly8x16_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslQs16 (void)
+{
+ int16x8_t out_int16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ int16x8_t arg1_int16x8_t;
+ int16x8_t arg2_int16x8_t;
+
+ out_int16x8_t = vbslq_s16 (arg0_uint16x8_t, arg1_int16x8_t, arg2_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslQs32 (void)
+{
+ int32x4_t out_int32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ int32x4_t arg1_int32x4_t;
+ int32x4_t arg2_int32x4_t;
+
+ out_int32x4_t = vbslq_s32 (arg0_uint32x4_t, arg1_int32x4_t, arg2_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslQs64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslQs64 (void)
+{
+ int64x2_t out_int64x2_t;
+ uint64x2_t arg0_uint64x2_t;
+ int64x2_t arg1_int64x2_t;
+ int64x2_t arg2_int64x2_t;
+
+ out_int64x2_t = vbslq_s64 (arg0_uint64x2_t, arg1_int64x2_t, arg2_int64x2_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslQs8 (void)
+{
+ int8x16_t out_int8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ int8x16_t arg1_int8x16_t;
+ int8x16_t arg2_int8x16_t;
+
+ out_int8x16_t = vbslq_s8 (arg0_uint8x16_t, arg1_int8x16_t, arg2_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+ uint16x8_t arg2_uint16x8_t;
+
+ out_uint16x8_t = vbslq_u16 (arg0_uint16x8_t, arg1_uint16x8_t, arg2_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+ uint32x4_t arg2_uint32x4_t;
+
+ out_uint32x4_t = vbslq_u32 (arg0_uint32x4_t, arg1_uint32x4_t, arg2_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslQu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslQu64 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint64x2_t arg0_uint64x2_t;
+ uint64x2_t arg1_uint64x2_t;
+ uint64x2_t arg2_uint64x2_t;
+
+ out_uint64x2_t = vbslq_u64 (arg0_uint64x2_t, arg1_uint64x2_t, arg2_uint64x2_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+ uint8x16_t arg2_uint8x16_t;
+
+ out_uint8x16_t = vbslq_u8 (arg0_uint8x16_t, arg1_uint8x16_t, arg2_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslf32 (void)
+{
+ float32x2_t out_float32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ float32x2_t arg1_float32x2_t;
+ float32x2_t arg2_float32x2_t;
+
+ out_float32x2_t = vbsl_f32 (arg0_uint32x2_t, arg1_float32x2_t, arg2_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslp16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslp16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslp16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslp16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslp16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslp16 (void)
+{
+ poly16x4_t out_poly16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ poly16x4_t arg1_poly16x4_t;
+ poly16x4_t arg2_poly16x4_t;
+
+ out_poly16x4_t = vbsl_p16 (arg0_uint16x4_t, arg1_poly16x4_t, arg2_poly16x4_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslp8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslp8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslp8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslp8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslp8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslp8 (void)
+{
+ poly8x8_t out_poly8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ poly8x8_t arg1_poly8x8_t;
+ poly8x8_t arg2_poly8x8_t;
+
+ out_poly8x8_t = vbsl_p8 (arg0_uint8x8_t, arg1_poly8x8_t, arg2_poly8x8_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbsls16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbsls16 (void)
+{
+ int16x4_t out_int16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ int16x4_t arg1_int16x4_t;
+ int16x4_t arg2_int16x4_t;
+
+ out_int16x4_t = vbsl_s16 (arg0_uint16x4_t, arg1_int16x4_t, arg2_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbsls32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbsls32 (void)
+{
+ int32x2_t out_int32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ int32x2_t arg1_int32x2_t;
+ int32x2_t arg2_int32x2_t;
+
+ out_int32x2_t = vbsl_s32 (arg0_uint32x2_t, arg1_int32x2_t, arg2_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbsls64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbsls64 (void)
+{
+ int64x1_t out_int64x1_t;
+ uint64x1_t arg0_uint64x1_t;
+ int64x1_t arg1_int64x1_t;
+ int64x1_t arg2_int64x1_t;
+
+ out_int64x1_t = vbsl_s64 (arg0_uint64x1_t, arg1_int64x1_t, arg2_int64x1_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbsls8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbsls8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbsls8 (void)
+{
+ int8x8_t out_int8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ int8x8_t arg1_int8x8_t;
+ int8x8_t arg2_int8x8_t;
+
+ out_int8x8_t = vbsl_s8 (arg0_uint8x8_t, arg1_int8x8_t, arg2_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+ uint16x4_t arg2_uint16x4_t;
+
+ out_uint16x4_t = vbsl_u16 (arg0_uint16x4_t, arg1_uint16x4_t, arg2_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+ uint32x2_t arg2_uint32x2_t;
+
+ out_uint32x2_t = vbsl_u32 (arg0_uint32x2_t, arg1_uint32x2_t, arg2_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslu64 (void)
+{
+ uint64x1_t out_uint64x1_t;
+ uint64x1_t arg0_uint64x1_t;
+ uint64x1_t arg1_uint64x1_t;
+ uint64x1_t arg2_uint64x1_t;
+
+ out_uint64x1_t = vbsl_u64 (arg0_uint64x1_t, arg1_uint64x1_t, arg2_uint64x1_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vbslu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,22 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vbslu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vbslu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+ uint8x8_t arg2_uint8x8_t;
+
+ out_uint8x8_t = vbsl_u8 (arg0_uint8x8_t, arg1_uint8x8_t, arg2_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "((vbsl)|(vbit)|(vbif))\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcageQf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcageQf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcageQf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcageQf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcageQf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcageQf32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ float32x4_t arg0_float32x4_t;
+ float32x4_t arg1_float32x4_t;
+
+ out_uint32x4_t = vcageq_f32 (arg0_float32x4_t, arg1_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vacge\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcagef32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcagef32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcagef32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcagef32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcagef32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcagef32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ float32x2_t arg0_float32x2_t;
+ float32x2_t arg1_float32x2_t;
+
+ out_uint32x2_t = vcage_f32 (arg0_float32x2_t, arg1_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vacge\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcagtQf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcagtQf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcagtQf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcagtQf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcagtQf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcagtQf32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ float32x4_t arg0_float32x4_t;
+ float32x4_t arg1_float32x4_t;
+
+ out_uint32x4_t = vcagtq_f32 (arg0_float32x4_t, arg1_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vacgt\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcagtf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcagtf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcagtf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcagtf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcagtf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcagtf32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ float32x2_t arg0_float32x2_t;
+ float32x2_t arg1_float32x2_t;
+
+ out_uint32x2_t = vcagt_f32 (arg0_float32x2_t, arg1_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vacgt\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcaleQf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcaleQf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcaleQf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcaleQf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcaleQf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcaleQf32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ float32x4_t arg0_float32x4_t;
+ float32x4_t arg1_float32x4_t;
+
+ out_uint32x4_t = vcaleq_f32 (arg0_float32x4_t, arg1_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vacge\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcalef32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcalef32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcalef32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcalef32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcalef32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcalef32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ float32x2_t arg0_float32x2_t;
+ float32x2_t arg1_float32x2_t;
+
+ out_uint32x2_t = vcale_f32 (arg0_float32x2_t, arg1_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vacge\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcaltQf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcaltQf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcaltQf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcaltQf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcaltQf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcaltQf32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ float32x4_t arg0_float32x4_t;
+ float32x4_t arg1_float32x4_t;
+
+ out_uint32x4_t = vcaltq_f32 (arg0_float32x4_t, arg1_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vacgt\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcaltf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcaltf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcaltf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcaltf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcaltf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcaltf32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ float32x2_t arg0_float32x2_t;
+ float32x2_t arg1_float32x2_t;
+
+ out_uint32x2_t = vcalt_f32 (arg0_float32x2_t, arg1_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vacgt\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vceqQf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vceqQf32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ float32x4_t arg0_float32x4_t;
+ float32x4_t arg1_float32x4_t;
+
+ out_uint32x4_t = vceqq_f32 (arg0_float32x4_t, arg1_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQp8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQp8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQp8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQp8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vceqQp8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vceqQp8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ poly8x16_t arg0_poly8x16_t;
+ poly8x16_t arg1_poly8x16_t;
+
+ out_uint8x16_t = vceqq_p8 (arg0_poly8x16_t, arg1_poly8x16_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.i8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vceqQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vceqQs16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_uint16x8_t = vceqq_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.i16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vceqQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vceqQs32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_uint32x4_t = vceqq_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.i32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vceqQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vceqQs8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_uint8x16_t = vceqq_s8 (arg0_int8x16_t, arg1_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.i8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vceqQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vceqQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint16x8_t = vceqq_u16 (arg0_uint16x8_t, arg1_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.i16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vceqQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vceqQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint32x4_t = vceqq_u32 (arg0_uint32x4_t, arg1_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.i32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vceqQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vceqQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+
+ out_uint8x16_t = vceqq_u8 (arg0_uint8x16_t, arg1_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.i8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vceqf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vceqf32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ float32x2_t arg0_float32x2_t;
+ float32x2_t arg1_float32x2_t;
+
+ out_uint32x2_t = vceq_f32 (arg0_float32x2_t, arg1_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqp8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqp8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqp8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqp8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vceqp8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vceqp8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ poly8x8_t arg0_poly8x8_t;
+ poly8x8_t arg1_poly8x8_t;
+
+ out_uint8x8_t = vceq_p8 (arg0_poly8x8_t, arg1_poly8x8_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.i8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vceqs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vceqs16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_uint16x4_t = vceq_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.i16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vceqs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vceqs32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_uint32x2_t = vceq_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.i32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vceqs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vceqs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vceqs8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_uint8x8_t = vceq_s8 (arg0_int8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.i8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcequ16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcequ16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcequ16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcequ16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcequ16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcequ16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint16x4_t = vceq_u16 (arg0_uint16x4_t, arg1_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.i16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcequ32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcequ32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcequ32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcequ32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcequ32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcequ32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint32x2_t = vceq_u32 (arg0_uint32x2_t, arg1_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.i32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcequ8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcequ8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcequ8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcequ8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcequ8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcequ8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint8x8_t = vceq_u8 (arg0_uint8x8_t, arg1_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vceq\.i8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgeQf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgeQf32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ float32x4_t arg0_float32x4_t;
+ float32x4_t arg1_float32x4_t;
+
+ out_uint32x4_t = vcgeq_f32 (arg0_float32x4_t, arg1_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgeQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgeQs16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_uint16x8_t = vcgeq_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.s16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgeQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgeQs32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_uint32x4_t = vcgeq_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgeQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgeQs8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_uint8x16_t = vcgeq_s8 (arg0_int8x16_t, arg1_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.s8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgeQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgeQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint16x8_t = vcgeq_u16 (arg0_uint16x8_t, arg1_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.u16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgeQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgeQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint32x4_t = vcgeq_u32 (arg0_uint32x4_t, arg1_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.u32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgeQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgeQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+
+ out_uint8x16_t = vcgeq_u8 (arg0_uint8x16_t, arg1_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.u8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgef32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgef32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgef32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgef32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgef32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgef32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ float32x2_t arg0_float32x2_t;
+ float32x2_t arg1_float32x2_t;
+
+ out_uint32x2_t = vcge_f32 (arg0_float32x2_t, arg1_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcges16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcges16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcges16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcges16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcges16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcges16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_uint16x4_t = vcge_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.s16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcges32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcges32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcges32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcges32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcges32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcges32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_uint32x2_t = vcge_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.s32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcges8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcges8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcges8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcges8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcges8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcges8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_uint8x8_t = vcge_s8 (arg0_int8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.s8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgeu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgeu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint16x4_t = vcge_u16 (arg0_uint16x4_t, arg1_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.u16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgeu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgeu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint32x2_t = vcge_u32 (arg0_uint32x2_t, arg1_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.u32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgeu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgeu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgeu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint8x8_t = vcge_u8 (arg0_uint8x8_t, arg1_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.u8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgtQf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgtQf32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ float32x4_t arg0_float32x4_t;
+ float32x4_t arg1_float32x4_t;
+
+ out_uint32x4_t = vcgtq_f32 (arg0_float32x4_t, arg1_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgtQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgtQs16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_uint16x8_t = vcgtq_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.s16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgtQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgtQs32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_uint32x4_t = vcgtq_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgtQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgtQs8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_uint8x16_t = vcgtq_s8 (arg0_int8x16_t, arg1_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.s8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgtQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgtQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint16x8_t = vcgtq_u16 (arg0_uint16x8_t, arg1_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.u16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgtQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgtQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint32x4_t = vcgtq_u32 (arg0_uint32x4_t, arg1_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.u32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgtQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgtQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+
+ out_uint8x16_t = vcgtq_u8 (arg0_uint8x16_t, arg1_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.u8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgtf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgtf32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ float32x2_t arg0_float32x2_t;
+ float32x2_t arg1_float32x2_t;
+
+ out_uint32x2_t = vcgt_f32 (arg0_float32x2_t, arg1_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgts16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgts16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgts16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgts16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgts16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgts16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_uint16x4_t = vcgt_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.s16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgts32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgts32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgts32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgts32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgts32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgts32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_uint32x2_t = vcgt_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.s32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgts8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgts8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgts8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgts8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgts8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgts8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_uint8x8_t = vcgt_s8 (arg0_int8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.s8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgtu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgtu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint16x4_t = vcgt_u16 (arg0_uint16x4_t, arg1_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.u16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgtu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgtu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint32x2_t = vcgt_u32 (arg0_uint32x2_t, arg1_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.u32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcgtu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcgtu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcgtu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint8x8_t = vcgt_u8 (arg0_uint8x8_t, arg1_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.u8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcleQf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcleQf32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ float32x4_t arg0_float32x4_t;
+ float32x4_t arg1_float32x4_t;
+
+ out_uint32x4_t = vcleq_f32 (arg0_float32x4_t, arg1_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcleQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcleQs16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_uint16x8_t = vcleq_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.s16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcleQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcleQs32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_uint32x4_t = vcleq_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcleQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcleQs8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_uint8x16_t = vcleq_s8 (arg0_int8x16_t, arg1_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.s8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcleQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcleQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint16x8_t = vcleq_u16 (arg0_uint16x8_t, arg1_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.u16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcleQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcleQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint32x4_t = vcleq_u32 (arg0_uint32x4_t, arg1_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.u32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcleQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcleQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+
+ out_uint8x16_t = vcleq_u8 (arg0_uint8x16_t, arg1_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.u8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclef32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclef32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclef32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclef32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclef32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclef32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ float32x2_t arg0_float32x2_t;
+ float32x2_t arg1_float32x2_t;
+
+ out_uint32x2_t = vcle_f32 (arg0_float32x2_t, arg1_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcles16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcles16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcles16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcles16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcles16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcles16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_uint16x4_t = vcle_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.s16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcles32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcles32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcles32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcles32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcles32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcles32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_uint32x2_t = vcle_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.s32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcles8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcles8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcles8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcles8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcles8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcles8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_uint8x8_t = vcle_s8 (arg0_int8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.s8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcleu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcleu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint16x4_t = vcle_u16 (arg0_uint16x4_t, arg1_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.u16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcleu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcleu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint32x2_t = vcle_u32 (arg0_uint32x2_t, arg1_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.u32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcleu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcleu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcleu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint8x8_t = vcle_u8 (arg0_uint8x8_t, arg1_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vcge\.u8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclsQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclsQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclsQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclsQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclsQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclsQs16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+
+ out_int16x8_t = vclsq_s16 (arg0_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vcls\.s16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclsQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclsQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclsQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclsQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclsQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclsQs32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+
+ out_int32x4_t = vclsq_s32 (arg0_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcls\.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclsQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclsQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclsQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclsQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclsQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclsQs8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+
+ out_int8x16_t = vclsq_s8 (arg0_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vcls\.s8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclss16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclss16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclss16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclss16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclss16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclss16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+
+ out_int16x4_t = vcls_s16 (arg0_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vcls\.s16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclss32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclss32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclss32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclss32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclss32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclss32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+
+ out_int32x2_t = vcls_s32 (arg0_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcls\.s32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclss8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclss8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclss8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclss8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclss8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclss8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+
+ out_int8x8_t = vcls_s8 (arg0_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vcls\.s8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcltQf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcltQf32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ float32x4_t arg0_float32x4_t;
+ float32x4_t arg1_float32x4_t;
+
+ out_uint32x4_t = vcltq_f32 (arg0_float32x4_t, arg1_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcltQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcltQs16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_uint16x8_t = vcltq_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.s16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcltQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcltQs32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_uint32x4_t = vcltq_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcltQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcltQs8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_uint8x16_t = vcltq_s8 (arg0_int8x16_t, arg1_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.s8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcltQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcltQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint16x8_t = vcltq_u16 (arg0_uint16x8_t, arg1_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.u16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcltQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcltQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint32x4_t = vcltq_u32 (arg0_uint32x4_t, arg1_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.u32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcltQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcltQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+
+ out_uint8x16_t = vcltq_u8 (arg0_uint8x16_t, arg1_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.u8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcltf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcltf32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ float32x2_t arg0_float32x2_t;
+ float32x2_t arg1_float32x2_t;
+
+ out_uint32x2_t = vclt_f32 (arg0_float32x2_t, arg1_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclts16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclts16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclts16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclts16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclts16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclts16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_uint16x4_t = vclt_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.s16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclts32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclts32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclts32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclts32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclts32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclts32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_uint32x2_t = vclt_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.s32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclts8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclts8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclts8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclts8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclts8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclts8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_uint8x8_t = vclt_s8 (arg0_int8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.s8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcltu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcltu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint16x4_t = vclt_u16 (arg0_uint16x4_t, arg1_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.u16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcltu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcltu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint32x2_t = vclt_u32 (arg0_uint32x2_t, arg1_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.u32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcltu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcltu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcltu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint8x8_t = vclt_u8 (arg0_uint8x8_t, arg1_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vcgt\.u8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclzQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclzQs16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+
+ out_int16x8_t = vclzq_s16 (arg0_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vclz\.i16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclzQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclzQs32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+
+ out_int32x4_t = vclzq_s32 (arg0_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vclz\.i32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclzQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclzQs8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+
+ out_int8x16_t = vclzq_s8 (arg0_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vclz\.i8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclzQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclzQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+
+ out_uint16x8_t = vclzq_u16 (arg0_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vclz\.i16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclzQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclzQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+
+ out_uint32x4_t = vclzq_u32 (arg0_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vclz\.i32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclzQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclzQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+
+ out_uint8x16_t = vclzq_u8 (arg0_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "vclz\.i8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclzs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclzs16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+
+ out_int16x4_t = vclz_s16 (arg0_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vclz\.i16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclzs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclzs32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+
+ out_int32x2_t = vclz_s32 (arg0_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vclz\.i32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclzs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclzs8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+
+ out_int8x8_t = vclz_s8 (arg0_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vclz\.i8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclzu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclzu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+
+ out_uint16x4_t = vclz_u16 (arg0_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "vclz\.i16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclzu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclzu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+
+ out_uint32x2_t = vclz_u32 (arg0_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vclz\.i32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vclzu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vclzu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vclzu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+
+ out_uint8x8_t = vclz_u8 (arg0_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vclz\.i8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntQp8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntQp8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntQp8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntQp8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcntQp8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcntQp8 (void)
+{
+ poly8x16_t out_poly8x16_t;
+ poly8x16_t arg0_poly8x16_t;
+
+ out_poly8x16_t = vcntq_p8 (arg0_poly8x16_t);
+}
+
+/* { dg-final { scan-assembler "vcnt\.8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcntQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcntQs8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+
+ out_int8x16_t = vcntq_s8 (arg0_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vcnt\.8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcntQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcntQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+
+ out_uint8x16_t = vcntq_u8 (arg0_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "vcnt\.8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntp8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntp8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntp8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntp8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcntp8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcntp8 (void)
+{
+ poly8x8_t out_poly8x8_t;
+ poly8x8_t arg0_poly8x8_t;
+
+ out_poly8x8_t = vcnt_p8 (arg0_poly8x8_t);
+}
+
+/* { dg-final { scan-assembler "vcnt\.8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcnts8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcnts8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcnts8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcnts8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcnts8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcnts8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+
+ out_int8x8_t = vcnt_s8 (arg0_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "vcnt\.8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcntu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcntu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcntu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+
+ out_uint8x8_t = vcnt_u8 (arg0_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "vcnt\.8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombinef32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombinef32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombinef32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombinef32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcombinef32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcombinef32 (void)
+{
+ float32x4_t out_float32x4_t;
+ float32x2_t arg0_float32x2_t;
+ float32x2_t arg1_float32x2_t;
+
+ out_float32x4_t = vcombine_f32 (arg0_float32x2_t, arg1_float32x2_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombinep16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombinep16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombinep16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombinep16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcombinep16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcombinep16 (void)
+{
+ poly16x8_t out_poly16x8_t;
+ poly16x4_t arg0_poly16x4_t;
+ poly16x4_t arg1_poly16x4_t;
+
+ out_poly16x8_t = vcombine_p16 (arg0_poly16x4_t, arg1_poly16x4_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombinep8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombinep8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombinep8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombinep8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcombinep8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcombinep8 (void)
+{
+ poly8x16_t out_poly8x16_t;
+ poly8x8_t arg0_poly8x8_t;
+ poly8x8_t arg1_poly8x8_t;
+
+ out_poly8x16_t = vcombine_p8 (arg0_poly8x8_t, arg1_poly8x8_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcombines16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcombines16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_int16x8_t = vcombine_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcombines32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcombines32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_int32x4_t = vcombine_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcombines64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcombines64 (void)
+{
+ int64x2_t out_int64x2_t;
+ int64x1_t arg0_int64x1_t;
+ int64x1_t arg1_int64x1_t;
+
+ out_int64x2_t = vcombine_s64 (arg0_int64x1_t, arg1_int64x1_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombines8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcombines8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcombines8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_int8x16_t = vcombine_s8 (arg0_int8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcombineu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcombineu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint16x8_t = vcombine_u16 (arg0_uint16x4_t, arg1_uint16x4_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcombineu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcombineu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint32x4_t = vcombine_u32 (arg0_uint32x2_t, arg1_uint32x2_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcombineu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcombineu64 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint64x1_t arg0_uint64x1_t;
+ uint64x1_t arg1_uint64x1_t;
+
+ out_uint64x2_t = vcombine_u64 (arg0_uint64x1_t, arg1_uint64x1_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcombineu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcombineu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcombineu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint8x16_t = vcombine_u8 (arg0_uint8x8_t, arg1_uint8x8_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreatef32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreatef32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreatef32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreatef32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcreatef32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcreatef32 (void)
+{
+ float32x2_t out_float32x2_t;
+ uint64_t arg0_uint64_t;
+
+ out_float32x2_t = vcreate_f32 (arg0_uint64_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreatep16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreatep16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreatep16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreatep16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcreatep16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcreatep16 (void)
+{
+ poly16x4_t out_poly16x4_t;
+ uint64_t arg0_uint64_t;
+
+ out_poly16x4_t = vcreate_p16 (arg0_uint64_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreatep8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreatep8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreatep8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreatep8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcreatep8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcreatep8 (void)
+{
+ poly8x8_t out_poly8x8_t;
+ uint64_t arg0_uint64_t;
+
+ out_poly8x8_t = vcreate_p8 (arg0_uint64_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcreates16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcreates16 (void)
+{
+ int16x4_t out_int16x4_t;
+ uint64_t arg0_uint64_t;
+
+ out_int16x4_t = vcreate_s16 (arg0_uint64_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcreates32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcreates32 (void)
+{
+ int32x2_t out_int32x2_t;
+ uint64_t arg0_uint64_t;
+
+ out_int32x2_t = vcreate_s32 (arg0_uint64_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcreates64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcreates64 (void)
+{
+ int64x1_t out_int64x1_t;
+ uint64_t arg0_uint64_t;
+
+ out_int64x1_t = vcreate_s64 (arg0_uint64_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreates8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcreates8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcreates8 (void)
+{
+ int8x8_t out_int8x8_t;
+ uint64_t arg0_uint64_t;
+
+ out_int8x8_t = vcreate_s8 (arg0_uint64_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcreateu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcreateu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint64_t arg0_uint64_t;
+
+ out_uint16x4_t = vcreate_u16 (arg0_uint64_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcreateu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcreateu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint64_t arg0_uint64_t;
+
+ out_uint32x2_t = vcreate_u32 (arg0_uint64_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcreateu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcreateu64 (void)
+{
+ uint64x1_t out_uint64x1_t;
+ uint64_t arg0_uint64_t;
+
+ out_uint64x1_t = vcreate_u64 (arg0_uint64_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcreateu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcreateu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcreateu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint64_t arg0_uint64_t;
+
+ out_uint8x8_t = vcreate_u8 (arg0_uint64_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_nf32_s32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_nf32_s32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_nf32_s32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_nf32_s32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvtQ_nf32_s32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvtQ_nf32_s32 (void)
+{
+ float32x4_t out_float32x4_t;
+ int32x4_t arg0_int32x4_t;
+
+ out_float32x4_t = vcvtq_n_f32_s32 (arg0_int32x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vcvt\.f32.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_nf32_u32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_nf32_u32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_nf32_u32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_nf32_u32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvtQ_nf32_u32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvtQ_nf32_u32 (void)
+{
+ float32x4_t out_float32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+
+ out_float32x4_t = vcvtq_n_f32_u32 (arg0_uint32x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vcvt\.f32.u32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_ns32_f32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_ns32_f32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_ns32_f32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_ns32_f32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvtQ_ns32_f32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvtQ_ns32_f32 (void)
+{
+ int32x4_t out_int32x4_t;
+ float32x4_t arg0_float32x4_t;
+
+ out_int32x4_t = vcvtq_n_s32_f32 (arg0_float32x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vcvt\.s32.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_nu32_f32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_nu32_f32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_nu32_f32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQ_nu32_f32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvtQ_nu32_f32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvtQ_nu32_f32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ float32x4_t arg0_float32x4_t;
+
+ out_uint32x4_t = vcvtq_n_u32_f32 (arg0_float32x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vcvt\.u32.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQf32_s32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQf32_s32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQf32_s32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQf32_s32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvtQf32_s32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvtQf32_s32 (void)
+{
+ float32x4_t out_float32x4_t;
+ int32x4_t arg0_int32x4_t;
+
+ out_float32x4_t = vcvtq_f32_s32 (arg0_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcvt\.f32.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQf32_u32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQf32_u32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQf32_u32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQf32_u32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvtQf32_u32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvtQf32_u32 (void)
+{
+ float32x4_t out_float32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+
+ out_float32x4_t = vcvtq_f32_u32 (arg0_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcvt\.f32.u32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQs32_f32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQs32_f32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQs32_f32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQs32_f32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvtQs32_f32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvtQs32_f32 (void)
+{
+ int32x4_t out_int32x4_t;
+ float32x4_t arg0_float32x4_t;
+
+ out_int32x4_t = vcvtq_s32_f32 (arg0_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcvt\.s32.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQu32_f32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQu32_f32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQu32_f32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtQu32_f32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvtQu32_f32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvtQu32_f32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ float32x4_t arg0_float32x4_t;
+
+ out_uint32x4_t = vcvtq_u32_f32 (arg0_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vcvt\.u32.f32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_nf32_s32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_nf32_s32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_nf32_s32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_nf32_s32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvt_nf32_s32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvt_nf32_s32 (void)
+{
+ float32x2_t out_float32x2_t;
+ int32x2_t arg0_int32x2_t;
+
+ out_float32x2_t = vcvt_n_f32_s32 (arg0_int32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vcvt\.f32.s32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_nf32_u32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_nf32_u32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_nf32_u32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_nf32_u32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvt_nf32_u32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvt_nf32_u32 (void)
+{
+ float32x2_t out_float32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+
+ out_float32x2_t = vcvt_n_f32_u32 (arg0_uint32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vcvt\.f32.u32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_ns32_f32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_ns32_f32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_ns32_f32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_ns32_f32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvt_ns32_f32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvt_ns32_f32 (void)
+{
+ int32x2_t out_int32x2_t;
+ float32x2_t arg0_float32x2_t;
+
+ out_int32x2_t = vcvt_n_s32_f32 (arg0_float32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vcvt\.s32.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_nu32_f32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_nu32_f32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_nu32_f32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvt_nu32_f32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvt_nu32_f32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvt_nu32_f32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ float32x2_t arg0_float32x2_t;
+
+ out_uint32x2_t = vcvt_n_u32_f32 (arg0_float32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vcvt\.u32.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtf32_s32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtf32_s32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtf32_s32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtf32_s32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvtf32_s32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvtf32_s32 (void)
+{
+ float32x2_t out_float32x2_t;
+ int32x2_t arg0_int32x2_t;
+
+ out_float32x2_t = vcvt_f32_s32 (arg0_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcvt\.f32.s32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtf32_u32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtf32_u32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtf32_u32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtf32_u32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvtf32_u32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvtf32_u32 (void)
+{
+ float32x2_t out_float32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+
+ out_float32x2_t = vcvt_f32_u32 (arg0_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcvt\.f32.u32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvts32_f32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvts32_f32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvts32_f32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvts32_f32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvts32_f32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvts32_f32 (void)
+{
+ int32x2_t out_int32x2_t;
+ float32x2_t arg0_float32x2_t;
+
+ out_int32x2_t = vcvt_s32_f32 (arg0_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcvt\.s32.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtu32_f32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtu32_f32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtu32_f32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vcvtu32_f32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vcvtu32_f32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vcvtu32_f32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ float32x2_t arg0_float32x2_t;
+
+ out_uint32x2_t = vcvt_u32_f32 (arg0_float32x2_t);
+}
+
+/* { dg-final { scan-assembler "vcvt\.u32.f32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanef32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanef32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanef32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanef32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_lanef32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_lanef32 (void)
+{
+ float32x4_t out_float32x4_t;
+ float32x2_t arg0_float32x2_t;
+
+ out_float32x4_t = vdupq_lane_f32 (arg0_float32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.32\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanep16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanep16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanep16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanep16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_lanep16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_lanep16 (void)
+{
+ poly16x8_t out_poly16x8_t;
+ poly16x4_t arg0_poly16x4_t;
+
+ out_poly16x8_t = vdupq_lane_p16 (arg0_poly16x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.16\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanep8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanep8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanep8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanep8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_lanep8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_lanep8 (void)
+{
+ poly8x16_t out_poly8x16_t;
+ poly8x8_t arg0_poly8x8_t;
+
+ out_poly8x16_t = vdupq_lane_p8 (arg0_poly8x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.8\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_lanes16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_lanes16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x4_t arg0_int16x4_t;
+
+ out_int16x8_t = vdupq_lane_s16 (arg0_int16x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.16\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_lanes32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_lanes32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x2_t arg0_int32x2_t;
+
+ out_int32x4_t = vdupq_lane_s32 (arg0_int32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.32\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_lanes64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_lanes64 (void)
+{
+ int64x2_t out_int64x2_t;
+ int64x1_t arg0_int64x1_t;
+
+ out_int64x2_t = vdupq_lane_s64 (arg0_int64x1_t, 0);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_lanes8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_lanes8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_lanes8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x8_t arg0_int8x8_t;
+
+ out_int8x16_t = vdupq_lane_s8 (arg0_int8x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.8\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_laneu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_laneu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x4_t arg0_uint16x4_t;
+
+ out_uint16x8_t = vdupq_lane_u16 (arg0_uint16x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.16\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_laneu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_laneu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x2_t arg0_uint32x2_t;
+
+ out_uint32x4_t = vdupq_lane_u32 (arg0_uint32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.32\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_laneu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_laneu64 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint64x1_t arg0_uint64x1_t;
+
+ out_uint64x2_t = vdupq_lane_u64 (arg0_uint64x1_t, 0);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_laneu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_laneu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_laneu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x8_t arg0_uint8x8_t;
+
+ out_uint8x16_t = vdupq_lane_u8 (arg0_uint8x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.8\[ \]+\[qQ\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_nf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_nf32 (void)
+{
+ float32x4_t out_float32x4_t;
+ float32_t arg0_float32_t;
+
+ out_float32x4_t = vdupq_n_f32 (arg0_float32_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.32\[ \]+\[qQ\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_np16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_np16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_np16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_np16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_np16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_np16 (void)
+{
+ poly16x8_t out_poly16x8_t;
+ poly16_t arg0_poly16_t;
+
+ out_poly16x8_t = vdupq_n_p16 (arg0_poly16_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.16\[ \]+\[qQ\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_np8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_np8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_np8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_np8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_np8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_np8 (void)
+{
+ poly8x16_t out_poly8x16_t;
+ poly8_t arg0_poly8_t;
+
+ out_poly8x16_t = vdupq_n_p8 (arg0_poly8_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.8\[ \]+\[qQ\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_ns16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_ns16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16_t arg0_int16_t;
+
+ out_int16x8_t = vdupq_n_s16 (arg0_int16_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.16\[ \]+\[qQ\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_ns32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_ns32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32_t arg0_int32_t;
+
+ out_int32x4_t = vdupq_n_s32 (arg0_int32_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.32\[ \]+\[qQ\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_ns64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_ns64 (void)
+{
+ int64x2_t out_int64x2_t;
+ int64_t arg0_int64_t;
+
+ out_int64x2_t = vdupq_n_s64 (arg0_int64_t);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[rR\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[rR\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_ns8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_ns8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_ns8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8_t arg0_int8_t;
+
+ out_int8x16_t = vdupq_n_s8 (arg0_int8_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.8\[ \]+\[qQ\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_nu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_nu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16_t arg0_uint16_t;
+
+ out_uint16x8_t = vdupq_n_u16 (arg0_uint16_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.16\[ \]+\[qQ\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_nu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_nu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32_t arg0_uint32_t;
+
+ out_uint32x4_t = vdupq_n_u32 (arg0_uint32_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.32\[ \]+\[qQ\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_nu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_nu64 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint64_t arg0_uint64_t;
+
+ out_uint64x2_t = vdupq_n_u64 (arg0_uint64_t);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[rR\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[rR\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdupQ_nu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdupQ_nu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdupQ_nu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8_t arg0_uint8_t;
+
+ out_uint8x16_t = vdupq_n_u8 (arg0_uint8_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.8\[ \]+\[qQ\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanef32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanef32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanef32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanef32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_lanef32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_lanef32 (void)
+{
+ float32x2_t out_float32x2_t;
+ float32x2_t arg0_float32x2_t;
+
+ out_float32x2_t = vdup_lane_f32 (arg0_float32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanep16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanep16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanep16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanep16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_lanep16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_lanep16 (void)
+{
+ poly16x4_t out_poly16x4_t;
+ poly16x4_t arg0_poly16x4_t;
+
+ out_poly16x4_t = vdup_lane_p16 (arg0_poly16x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanep8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanep8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanep8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanep8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_lanep8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_lanep8 (void)
+{
+ poly8x8_t out_poly8x8_t;
+ poly8x8_t arg0_poly8x8_t;
+
+ out_poly8x8_t = vdup_lane_p8 (arg0_poly8x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_lanes16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_lanes16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+
+ out_int16x4_t = vdup_lane_s16 (arg0_int16x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_lanes32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_lanes32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+
+ out_int32x2_t = vdup_lane_s32 (arg0_int32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_lanes64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_lanes64 (void)
+{
+ int64x1_t out_int64x1_t;
+ int64x1_t arg0_int64x1_t;
+
+ out_int64x1_t = vdup_lane_s64 (arg0_int64x1_t, 0);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_lanes8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_lanes8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_lanes8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+
+ out_int8x8_t = vdup_lane_s8 (arg0_int8x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_laneu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_laneu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+
+ out_uint16x4_t = vdup_lane_u16 (arg0_uint16x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_laneu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_laneu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+
+ out_uint32x2_t = vdup_lane_u32 (arg0_uint32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_laneu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_laneu64 (void)
+{
+ uint64x1_t out_uint64x1_t;
+ uint64x1_t arg0_uint64x1_t;
+
+ out_uint64x1_t = vdup_lane_u64 (arg0_uint64x1_t, 0);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_laneu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_laneu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_laneu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+
+ out_uint8x8_t = vdup_lane_u8 (arg0_uint8x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vdup\.8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_nf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_nf32 (void)
+{
+ float32x2_t out_float32x2_t;
+ float32_t arg0_float32_t;
+
+ out_float32x2_t = vdup_n_f32 (arg0_float32_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.32\[ \]+\[dD\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_np16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_np16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_np16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_np16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_np16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_np16 (void)
+{
+ poly16x4_t out_poly16x4_t;
+ poly16_t arg0_poly16_t;
+
+ out_poly16x4_t = vdup_n_p16 (arg0_poly16_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.16\[ \]+\[dD\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_np8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_np8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_np8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_np8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_np8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_np8 (void)
+{
+ poly8x8_t out_poly8x8_t;
+ poly8_t arg0_poly8_t;
+
+ out_poly8x8_t = vdup_n_p8 (arg0_poly8_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.8\[ \]+\[dD\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_ns16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_ns16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16_t arg0_int16_t;
+
+ out_int16x4_t = vdup_n_s16 (arg0_int16_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.16\[ \]+\[dD\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_ns32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_ns32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32_t arg0_int32_t;
+
+ out_int32x2_t = vdup_n_s32 (arg0_int32_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.32\[ \]+\[dD\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_ns64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_ns64 (void)
+{
+ int64x1_t out_int64x1_t;
+ int64_t arg0_int64_t;
+
+ out_int64x1_t = vdup_n_s64 (arg0_int64_t);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[rR\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_ns8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_ns8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_ns8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8_t arg0_int8_t;
+
+ out_int8x8_t = vdup_n_s8 (arg0_int8_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.8\[ \]+\[dD\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_nu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_nu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16_t arg0_uint16_t;
+
+ out_uint16x4_t = vdup_n_u16 (arg0_uint16_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.16\[ \]+\[dD\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_nu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_nu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32_t arg0_uint32_t;
+
+ out_uint32x2_t = vdup_n_u32 (arg0_uint32_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.32\[ \]+\[dD\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_nu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_nu64 (void)
+{
+ uint64x1_t out_uint64x1_t;
+ uint64_t arg0_uint64_t;
+
+ out_uint64x1_t = vdup_n_u64 (arg0_uint64_t);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[rR\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vdup_nu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vdup_nu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vdup_nu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8_t arg0_uint8_t;
+
+ out_uint8x8_t = vdup_n_u8 (arg0_uint8_t);
+}
+
+/* { dg-final { scan-assembler "vdup\.8\[ \]+\[dD\]\[0-9\]+, \[rR\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veorQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veorQs16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_int16x8_t = veorq_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veorQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veorQs32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_int32x4_t = veorq_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veorQs64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veorQs64 (void)
+{
+ int64x2_t out_int64x2_t;
+ int64x2_t arg0_int64x2_t;
+ int64x2_t arg1_int64x2_t;
+
+ out_int64x2_t = veorq_s64 (arg0_int64x2_t, arg1_int64x2_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veorQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veorQs8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_int8x16_t = veorq_s8 (arg0_int8x16_t, arg1_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veorQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veorQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint16x8_t = veorq_u16 (arg0_uint16x8_t, arg1_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veorQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veorQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint32x4_t = veorq_u32 (arg0_uint32x4_t, arg1_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veorQu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veorQu64 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint64x2_t arg0_uint64x2_t;
+ uint64x2_t arg1_uint64x2_t;
+
+ out_uint64x2_t = veorq_u64 (arg0_uint64x2_t, arg1_uint64x2_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veorQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veorQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veorQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+
+ out_uint8x16_t = veorq_u8 (arg0_uint8x16_t, arg1_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veors16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veors16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_int16x4_t = veor_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veors32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veors32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_int32x2_t = veor_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veors64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veors64 (void)
+{
+ int64x1_t out_int64x1_t;
+ int64x1_t arg0_int64x1_t;
+ int64x1_t arg1_int64x1_t;
+
+ out_int64x1_t = veor_s64 (arg0_int64x1_t, arg1_int64x1_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veors8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veors8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veors8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_int8x8_t = veor_s8 (arg0_int8x8_t, arg1_int8x8_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veoru16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veoru16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint16x4_t = veor_u16 (arg0_uint16x4_t, arg1_uint16x4_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veoru32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veoru32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint32x2_t = veor_u32 (arg0_uint32x2_t, arg1_uint32x2_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veoru64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veoru64 (void)
+{
+ uint64x1_t out_uint64x1_t;
+ uint64x1_t arg0_uint64x1_t;
+ uint64x1_t arg1_uint64x1_t;
+
+ out_uint64x1_t = veor_u64 (arg0_uint64x1_t, arg1_uint64x1_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/veoru8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `veoru8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_veoru8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint8x8_t = veor_u8 (arg0_uint8x8_t, arg1_uint8x8_t);
+}
+
+/* { dg-final { scan-assembler "veor\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextQf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextQf32 (void)
+{
+ float32x4_t out_float32x4_t;
+ float32x4_t arg0_float32x4_t;
+ float32x4_t arg1_float32x4_t;
+
+ out_float32x4_t = vextq_f32 (arg0_float32x4_t, arg1_float32x4_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQp16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQp16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQp16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQp16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextQp16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextQp16 (void)
+{
+ poly16x8_t out_poly16x8_t;
+ poly16x8_t arg0_poly16x8_t;
+ poly16x8_t arg1_poly16x8_t;
+
+ out_poly16x8_t = vextq_p16 (arg0_poly16x8_t, arg1_poly16x8_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQp8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQp8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQp8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQp8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextQp8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextQp8 (void)
+{
+ poly8x16_t out_poly8x16_t;
+ poly8x16_t arg0_poly8x16_t;
+ poly8x16_t arg1_poly8x16_t;
+
+ out_poly8x16_t = vextq_p8 (arg0_poly8x16_t, arg1_poly8x16_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextQs16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_int16x8_t = vextq_s16 (arg0_int16x8_t, arg1_int16x8_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextQs32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_int32x4_t = vextq_s32 (arg0_int32x4_t, arg1_int32x4_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextQs64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextQs64 (void)
+{
+ int64x2_t out_int64x2_t;
+ int64x2_t arg0_int64x2_t;
+ int64x2_t arg1_int64x2_t;
+
+ out_int64x2_t = vextq_s64 (arg0_int64x2_t, arg1_int64x2_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.64\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextQs8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_int8x16_t = vextq_s8 (arg0_int8x16_t, arg1_int8x16_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint16x8_t = vextq_u16 (arg0_uint16x8_t, arg1_uint16x8_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint32x4_t = vextq_u32 (arg0_uint32x4_t, arg1_uint32x4_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextQu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextQu64 (void)
+{
+ uint64x2_t out_uint64x2_t;
+ uint64x2_t arg0_uint64x2_t;
+ uint64x2_t arg1_uint64x2_t;
+
+ out_uint64x2_t = vextq_u64 (arg0_uint64x2_t, arg1_uint64x2_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.64\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+
+ out_uint8x16_t = vextq_u8 (arg0_uint8x16_t, arg1_uint8x16_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextf32 (void)
+{
+ float32x2_t out_float32x2_t;
+ float32x2_t arg0_float32x2_t;
+ float32x2_t arg1_float32x2_t;
+
+ out_float32x2_t = vext_f32 (arg0_float32x2_t, arg1_float32x2_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextp16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextp16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextp16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextp16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextp16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextp16 (void)
+{
+ poly16x4_t out_poly16x4_t;
+ poly16x4_t arg0_poly16x4_t;
+ poly16x4_t arg1_poly16x4_t;
+
+ out_poly16x4_t = vext_p16 (arg0_poly16x4_t, arg1_poly16x4_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextp8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextp8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextp8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextp8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextp8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextp8 (void)
+{
+ poly8x8_t out_poly8x8_t;
+ poly8x8_t arg0_poly8x8_t;
+ poly8x8_t arg1_poly8x8_t;
+
+ out_poly8x8_t = vext_p8 (arg0_poly8x8_t, arg1_poly8x8_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vexts16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vexts16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_int16x4_t = vext_s16 (arg0_int16x4_t, arg1_int16x4_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vexts32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vexts32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_int32x2_t = vext_s32 (arg0_int32x2_t, arg1_int32x2_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vexts64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vexts64 (void)
+{
+ int64x1_t out_int64x1_t;
+ int64x1_t arg0_int64x1_t;
+ int64x1_t arg1_int64x1_t;
+
+ out_int64x1_t = vext_s64 (arg0_int64x1_t, arg1_int64x1_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.64\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vexts8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vexts8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vexts8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x8_t arg0_int8x8_t;
+ int8x8_t arg1_int8x8_t;
+
+ out_int8x8_t = vext_s8 (arg0_int8x8_t, arg1_int8x8_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x4_t arg0_uint16x4_t;
+ uint16x4_t arg1_uint16x4_t;
+
+ out_uint16x4_t = vext_u16 (arg0_uint16x4_t, arg1_uint16x4_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x2_t arg0_uint32x2_t;
+ uint32x2_t arg1_uint32x2_t;
+
+ out_uint32x2_t = vext_u32 (arg0_uint32x2_t, arg1_uint32x2_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextu64 (void)
+{
+ uint64x1_t out_uint64x1_t;
+ uint64x1_t arg0_uint64x1_t;
+ uint64x1_t arg1_uint64x1_t;
+
+ out_uint64x1_t = vext_u64 (arg0_uint64x1_t, arg1_uint64x1_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.64\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vextu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vextu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vextu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x8_t arg0_uint8x8_t;
+ uint8x8_t arg1_uint8x8_t;
+
+ out_uint8x8_t = vext_u8 (arg0_uint8x8_t, arg1_uint8x8_t, 0);
+}
+
+/* { dg-final { scan-assembler "vext\.8\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+, #\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanef32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanef32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanef32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanef32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vgetQ_lanef32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vgetQ_lanef32 (void)
+{
+ float32_t out_float32_t;
+ float32x4_t arg0_float32x4_t;
+
+ out_float32_t = vgetq_lane_f32 (arg0_float32x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.f32\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanep16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanep16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanep16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanep16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vgetQ_lanep16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vgetQ_lanep16 (void)
+{
+ poly16_t out_poly16_t;
+ poly16x8_t arg0_poly16x8_t;
+
+ out_poly16_t = vgetq_lane_p16 (arg0_poly16x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.u16\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanep8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanep8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanep8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanep8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vgetQ_lanep8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vgetQ_lanep8 (void)
+{
+ poly8_t out_poly8_t;
+ poly8x16_t arg0_poly8x16_t;
+
+ out_poly8_t = vgetq_lane_p8 (arg0_poly8x16_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.u8\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vgetQ_lanes16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vgetQ_lanes16 (void)
+{
+ int16_t out_int16_t;
+ int16x8_t arg0_int16x8_t;
+
+ out_int16_t = vgetq_lane_s16 (arg0_int16x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.s16\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vgetQ_lanes32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vgetQ_lanes32 (void)
+{
+ int32_t out_int32_t;
+ int32x4_t arg0_int32x4_t;
+
+ out_int32_t = vgetq_lane_s32 (arg0_int32x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.s32\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vgetQ_lanes64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vgetQ_lanes64 (void)
+{
+ int64_t out_int64_t;
+ int64x2_t arg0_int64x2_t;
+
+ out_int64_t = vgetq_lane_s64 (arg0_int64x2_t, 0);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[rR\]\[0-9\]+, \[rR\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vgetQ_lanes8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vgetQ_lanes8 (void)
+{
+ int8_t out_int8_t;
+ int8x16_t arg0_int8x16_t;
+
+ out_int8_t = vgetq_lane_s8 (arg0_int8x16_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.s8\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vgetQ_laneu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vgetQ_laneu16 (void)
+{
+ uint16_t out_uint16_t;
+ uint16x8_t arg0_uint16x8_t;
+
+ out_uint16_t = vgetq_lane_u16 (arg0_uint16x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.u16\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vgetQ_laneu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vgetQ_laneu32 (void)
+{
+ uint32_t out_uint32_t;
+ uint32x4_t arg0_uint32x4_t;
+
+ out_uint32_t = vgetq_lane_u32 (arg0_uint32x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.u32\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vgetQ_laneu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vgetQ_laneu64 (void)
+{
+ uint64_t out_uint64_t;
+ uint64x2_t arg0_uint64x2_t;
+
+ out_uint64_t = vgetq_lane_u64 (arg0_uint64x2_t, 0);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[rR\]\[0-9\]+, \[rR\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vgetQ_laneu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vgetQ_laneu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vgetQ_laneu8 (void)
+{
+ uint8_t out_uint8_t;
+ uint8x16_t arg0_uint8x16_t;
+
+ out_uint8_t = vgetq_lane_u8 (arg0_uint8x16_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.u8\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_highf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_highf32 (void)
+{
+ float32x2_t out_float32x2_t;
+ float32x4_t arg0_float32x4_t;
+
+ out_float32x2_t = vget_high_f32 (arg0_float32x4_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highp16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highp16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highp16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highp16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_highp16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_highp16 (void)
+{
+ poly16x4_t out_poly16x4_t;
+ poly16x8_t arg0_poly16x8_t;
+
+ out_poly16x4_t = vget_high_p16 (arg0_poly16x8_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highp8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highp8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highp8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highp8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_highp8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_highp8 (void)
+{
+ poly8x8_t out_poly8x8_t;
+ poly8x16_t arg0_poly8x16_t;
+
+ out_poly8x8_t = vget_high_p8 (arg0_poly8x16_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_highs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_highs16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x8_t arg0_int16x8_t;
+
+ out_int16x4_t = vget_high_s16 (arg0_int16x8_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_highs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_highs32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x4_t arg0_int32x4_t;
+
+ out_int32x2_t = vget_high_s32 (arg0_int32x4_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_highs64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_highs64 (void)
+{
+ int64x1_t out_int64x1_t;
+ int64x2_t arg0_int64x2_t;
+
+ out_int64x1_t = vget_high_s64 (arg0_int64x2_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_highs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_highs8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x16_t arg0_int8x16_t;
+
+ out_int8x8_t = vget_high_s8 (arg0_int8x16_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_highu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_highu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x8_t arg0_uint16x8_t;
+
+ out_uint16x4_t = vget_high_u16 (arg0_uint16x8_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_highu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_highu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x4_t arg0_uint32x4_t;
+
+ out_uint32x2_t = vget_high_u32 (arg0_uint32x4_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_highu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_highu64 (void)
+{
+ uint64x1_t out_uint64x1_t;
+ uint64x2_t arg0_uint64x2_t;
+
+ out_uint64x1_t = vget_high_u64 (arg0_uint64x2_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_highu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,19 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_highu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_highu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x16_t arg0_uint8x16_t;
+
+ out_uint8x8_t = vget_high_u8 (arg0_uint8x16_t);
+}
+
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanef32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanef32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanef32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanef32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lanef32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lanef32 (void)
+{
+ float32_t out_float32_t;
+ float32x2_t arg0_float32x2_t;
+
+ out_float32_t = vget_lane_f32 (arg0_float32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.f32\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanep16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanep16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanep16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanep16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lanep16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lanep16 (void)
+{
+ poly16_t out_poly16_t;
+ poly16x4_t arg0_poly16x4_t;
+
+ out_poly16_t = vget_lane_p16 (arg0_poly16x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.u16\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanep8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanep8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanep8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanep8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lanep8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lanep8 (void)
+{
+ poly8_t out_poly8_t;
+ poly8x8_t arg0_poly8x8_t;
+
+ out_poly8_t = vget_lane_p8 (arg0_poly8x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.u8\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lanes16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lanes16 (void)
+{
+ int16_t out_int16_t;
+ int16x4_t arg0_int16x4_t;
+
+ out_int16_t = vget_lane_s16 (arg0_int16x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.s16\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lanes32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lanes32 (void)
+{
+ int32_t out_int32_t;
+ int32x2_t arg0_int32x2_t;
+
+ out_int32_t = vget_lane_s32 (arg0_int32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.s32\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lanes64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lanes64 (void)
+{
+ int64_t out_int64_t;
+ int64x1_t arg0_int64x1_t;
+
+ out_int64_t = vget_lane_s64 (arg0_int64x1_t, 0);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[rR\]\[0-9\]+, \[rR\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lanes8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lanes8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lanes8 (void)
+{
+ int8_t out_int8_t;
+ int8x8_t arg0_int8x8_t;
+
+ out_int8_t = vget_lane_s8 (arg0_int8x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.s8\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_laneu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_laneu16 (void)
+{
+ uint16_t out_uint16_t;
+ uint16x4_t arg0_uint16x4_t;
+
+ out_uint16_t = vget_lane_u16 (arg0_uint16x4_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.u16\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_laneu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_laneu32 (void)
+{
+ uint32_t out_uint32_t;
+ uint32x2_t arg0_uint32x2_t;
+
+ out_uint32_t = vget_lane_u32 (arg0_uint32x2_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.u32\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_laneu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_laneu64 (void)
+{
+ uint64_t out_uint64_t;
+ uint64x1_t arg0_uint64x1_t;
+
+ out_uint64_t = vget_lane_u64 (arg0_uint64x1_t, 0);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[rR\]\[0-9\]+, \[rR\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_laneu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_laneu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_laneu8 (void)
+{
+ uint8_t out_uint8_t;
+ uint8x8_t arg0_uint8x8_t;
+
+ out_uint8_t = vget_lane_u8 (arg0_uint8x8_t, 1);
+}
+
+/* { dg-final { scan-assembler "vmov\.u8\[ \]+\[rR\]\[0-9\]+, \[dD\]\[0-9\]+\\\[\[0-9\]+\\\]!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowf32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowf32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowf32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowf32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lowf32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lowf32 (void)
+{
+ float32x2_t out_float32x2_t;
+ float32x4_t arg0_float32x4_t;
+
+ out_float32x2_t = vget_low_f32 (arg0_float32x4_t);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowp16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowp16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowp16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowp16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lowp16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lowp16 (void)
+{
+ poly16x4_t out_poly16x4_t;
+ poly16x8_t arg0_poly16x8_t;
+
+ out_poly16x4_t = vget_low_p16 (arg0_poly16x8_t);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowp8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowp8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowp8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowp8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lowp8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lowp8 (void)
+{
+ poly8x8_t out_poly8x8_t;
+ poly8x16_t arg0_poly8x16_t;
+
+ out_poly8x8_t = vget_low_p8 (arg0_poly8x16_t);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lows16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lows16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x8_t arg0_int16x8_t;
+
+ out_int16x4_t = vget_low_s16 (arg0_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lows32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lows32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x4_t arg0_int32x4_t;
+
+ out_int32x2_t = vget_low_s32 (arg0_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lows64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lows64 (void)
+{
+ int64x1_t out_int64x1_t;
+ int64x2_t arg0_int64x2_t;
+
+ out_int64x1_t = vget_low_s64 (arg0_int64x2_t);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lows8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lows8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lows8 (void)
+{
+ int8x8_t out_int8x8_t;
+ int8x16_t arg0_int8x16_t;
+
+ out_int8x8_t = vget_low_s8 (arg0_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lowu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lowu16 (void)
+{
+ uint16x4_t out_uint16x4_t;
+ uint16x8_t arg0_uint16x8_t;
+
+ out_uint16x4_t = vget_low_u16 (arg0_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lowu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lowu32 (void)
+{
+ uint32x2_t out_uint32x2_t;
+ uint32x4_t arg0_uint32x4_t;
+
+ out_uint32x2_t = vget_low_u32 (arg0_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu64.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu64.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu64.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu64.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lowu64' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lowu64 (void)
+{
+ uint64x1_t out_uint64x1_t;
+ uint64x2_t arg0_uint64x2_t;
+
+ out_uint64x1_t = vget_low_u64 (arg0_uint64x2_t);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vget_lowu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,20 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vget_lowu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vget_lowu8 (void)
+{
+ uint8x8_t out_uint8x8_t;
+ uint8x16_t arg0_uint8x16_t;
+
+ out_uint8x8_t = vget_low_u8 (arg0_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "vmov\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQs16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQs16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQs16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQs16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vhaddQs16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vhaddQs16 (void)
+{
+ int16x8_t out_int16x8_t;
+ int16x8_t arg0_int16x8_t;
+ int16x8_t arg1_int16x8_t;
+
+ out_int16x8_t = vhaddq_s16 (arg0_int16x8_t, arg1_int16x8_t);
+}
+
+/* { dg-final { scan-assembler "vhadd\.s16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQs32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQs32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQs32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQs32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vhaddQs32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vhaddQs32 (void)
+{
+ int32x4_t out_int32x4_t;
+ int32x4_t arg0_int32x4_t;
+ int32x4_t arg1_int32x4_t;
+
+ out_int32x4_t = vhaddq_s32 (arg0_int32x4_t, arg1_int32x4_t);
+}
+
+/* { dg-final { scan-assembler "vhadd\.s32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQs8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQs8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQs8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQs8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vhaddQs8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vhaddQs8 (void)
+{
+ int8x16_t out_int8x16_t;
+ int8x16_t arg0_int8x16_t;
+ int8x16_t arg1_int8x16_t;
+
+ out_int8x16_t = vhaddq_s8 (arg0_int8x16_t, arg1_int8x16_t);
+}
+
+/* { dg-final { scan-assembler "vhadd\.s8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQu16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQu16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQu16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQu16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vhaddQu16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vhaddQu16 (void)
+{
+ uint16x8_t out_uint16x8_t;
+ uint16x8_t arg0_uint16x8_t;
+ uint16x8_t arg1_uint16x8_t;
+
+ out_uint16x8_t = vhaddq_u16 (arg0_uint16x8_t, arg1_uint16x8_t);
+}
+
+/* { dg-final { scan-assembler "vhadd\.u16\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQu32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQu32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQu32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQu32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vhaddQu32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vhaddQu32 (void)
+{
+ uint32x4_t out_uint32x4_t;
+ uint32x4_t arg0_uint32x4_t;
+ uint32x4_t arg1_uint32x4_t;
+
+ out_uint32x4_t = vhaddq_u32 (arg0_uint32x4_t, arg1_uint32x4_t);
+}
+
+/* { dg-final { scan-assembler "vhadd\.u32\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQu8.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQu8.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQu8.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhaddQu8.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vhaddQu8' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vhaddQu8 (void)
+{
+ uint8x16_t out_uint8x16_t;
+ uint8x16_t arg0_uint8x16_t;
+ uint8x16_t arg1_uint8x16_t;
+
+ out_uint8x16_t = vhaddq_u8 (arg0_uint8x16_t, arg1_uint8x16_t);
+}
+
+/* { dg-final { scan-assembler "vhadd\.u8\[ \]+\[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+, \[qQ\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhadds16.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhadds16.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhadds16.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhadds16.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vhadds16' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vhadds16 (void)
+{
+ int16x4_t out_int16x4_t;
+ int16x4_t arg0_int16x4_t;
+ int16x4_t arg1_int16x4_t;
+
+ out_int16x4_t = vhadd_s16 (arg0_int16x4_t, arg1_int16x4_t);
+}
+
+/* { dg-final { scan-assembler "vhadd\.s16\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
Added: llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhadds32.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhadds32.c?rev=76781&view=auto
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhadds32.c (added)
+++ llvm-gcc-4.2/trunk/gcc/testsuite/gcc.target/arm/neon/vhadds32.c Wed Jul 22 15:36:27 2009
@@ -0,0 +1,21 @@
+/* APPLE LOCAL file v7 merge */
+/* Test the `vhadds32' ARM Neon intrinsic. */
+/* This file was autogenerated by neon-testgen. */
+
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-options "-save-temps -O0 -mfpu=neon -mfloat-abi=softfp" } */
+
+#include "arm_neon.h"
+
+void test_vhadds32 (void)
+{
+ int32x2_t out_int32x2_t;
+ int32x2_t arg0_int32x2_t;
+ int32x2_t arg1_int32x2_t;
+
+ out_int32x2_t = vhadd_s32 (arg0_int32x2_t, arg1_int32x2_t);
+}
+
+/* { dg-final { scan-assembler "vhadd\.s32\[ \]+\[dD\]\[0-9\]+, \[dD\]\[0-9\]+, \[dD\]\[0-9\]+!?\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
More information about the llvm-commits
mailing list