[llvm-commits] [llvm-gcc-4.2] r80638 - in /llvm-gcc-4.2/trunk/gcc/config/arm: arm-modes.def arm.c arm.h arm_neon.h llvm-arm.cpp neon-gen.ml neon.md neon.ml
Anton Korobeynikov
asl at math.spbu.ru
Mon Aug 31 16:16:02 PDT 2009
Author: asl
Date: Mon Aug 31 18:16:02 2009
New Revision: 80638
URL: http://llvm.org/viewvc/llvm-project?rev=80638&view=rev
Log:
Use V1DI mode for v1i64 NEON vectors, not just DI mode.
This fixes more than 150 fails in gcc NEON testsuite.
Modified:
llvm-gcc-4.2/trunk/gcc/config/arm/arm-modes.def
llvm-gcc-4.2/trunk/gcc/config/arm/arm.c
llvm-gcc-4.2/trunk/gcc/config/arm/arm.h
llvm-gcc-4.2/trunk/gcc/config/arm/arm_neon.h
llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp
llvm-gcc-4.2/trunk/gcc/config/arm/neon-gen.ml
llvm-gcc-4.2/trunk/gcc/config/arm/neon.md
llvm-gcc-4.2/trunk/gcc/config/arm/neon.ml
Modified: llvm-gcc-4.2/trunk/gcc/config/arm/arm-modes.def
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/arm-modes.def?rev=80638&r1=80637&r2=80638&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/arm-modes.def (original)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/arm-modes.def Mon Aug 31 18:16:02 2009
@@ -58,6 +58,10 @@
VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */
VECTOR_MODES (FLOAT, 16); /* V8HF V4SF V2DF */
+/* LLVM LOCAL begin */
+VECTOR_MODE (INT, DI, 1); /* V1DI */
+/* LLVM LOCAL end */
+
/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
/* Opaque integer modes for 3, 4, 6 or 8 Neon double registers (2 is
TImode). */
Modified: llvm-gcc-4.2/trunk/gcc/config/arm/arm.c
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/arm.c?rev=80638&r1=80637&r2=80638&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/arm.c (original)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/arm.c Mon Aug 31 18:16:02 2009
@@ -16244,7 +16244,9 @@
T_V4HI = 0x0002,
T_V2SI = 0x0004,
T_V2SF = 0x0008,
- T_DI = 0x0010,
+ /* LLVM LOCAL begin */
+ T_V1DI = 0x0010,
+ /* LLVM LOCAL end */
T_V16QI = 0x0020,
T_V8HI = 0x0040,
T_V4SI = 0x0080,
@@ -16259,7 +16261,9 @@
#define v4hi_UP T_V4HI
#define v2si_UP T_V2SI
#define v2sf_UP T_V2SF
-#define di_UP T_DI
+/* LLVM LOCAL begin */
+#define v1di_UP T_V1DI
+/* LLVM LOCAL end */
#define v16qi_UP T_V16QI
#define v8hi_UP T_V8HI
#define v4si_UP T_V4SI
@@ -16389,14 +16393,15 @@
WARNING: Variants should be listed in the same increasing order as
neon_builtin_type_bits. */
+/* LLVM LOCAL begin */
static neon_builtin_datum neon_builtin_data[] =
{
{ VAR10 (BINOP, vadd,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR3 (BINOP, vaddl, v8qi, v4hi, v2si) },
{ VAR3 (BINOP, vaddw, v8qi, v4hi, v2si) },
{ VAR6 (BINOP, vhadd, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
- { VAR8 (BINOP, vqadd, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (BINOP, vqadd, v8qi, v4hi, v2si, v1di, v16qi, v8hi, v4si, v2di) },
{ VAR3 (BINOP, vaddhn, v8hi, v4si, v2di) },
{ VAR8 (BINOP, vmul, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
{ VAR8 (TERNOP, vmla, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
@@ -16414,22 +16419,22 @@
{ VAR4 (SCALARMULH, vqdmulh_n, v4hi, v2si, v8hi, v4si) },
{ VAR4 (LANEMULH, vqdmulh_lane, v4hi, v2si, v8hi, v4si) },
{ VAR2 (BINOP, vqdmull, v4hi, v2si) },
- { VAR8 (BINOP, vshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
- { VAR8 (BINOP, vqshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
- { VAR8 (SHIFTIMM, vshr_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (BINOP, vshl, v8qi, v4hi, v2si, v1di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (BINOP, vqshl, v8qi, v4hi, v2si, v1di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (SHIFTIMM, vshr_n, v8qi, v4hi, v2si, v1di, v16qi, v8hi, v4si, v2di) },
{ VAR3 (SHIFTIMM, vshrn_n, v8hi, v4si, v2di) },
{ VAR3 (SHIFTIMM, vqshrn_n, v8hi, v4si, v2di) },
{ VAR3 (SHIFTIMM, vqshrun_n, v8hi, v4si, v2di) },
- { VAR8 (SHIFTIMM, vshl_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
- { VAR8 (SHIFTIMM, vqshl_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
- { VAR8 (SHIFTIMM, vqshlu_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (SHIFTIMM, vshl_n, v8qi, v4hi, v2si, v1di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (SHIFTIMM, vqshl_n, v8qi, v4hi, v2si, v1di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (SHIFTIMM, vqshlu_n, v8qi, v4hi, v2si, v1di, v16qi, v8hi, v4si, v2di) },
{ VAR3 (SHIFTIMM, vshll_n, v8qi, v4hi, v2si) },
- { VAR8 (SHIFTACC, vsra_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (SHIFTACC, vsra_n, v8qi, v4hi, v2si, v1di, v16qi, v8hi, v4si, v2di) },
{ VAR10 (BINOP, vsub,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR3 (BINOP, vsubl, v8qi, v4hi, v2si) },
{ VAR3 (BINOP, vsubw, v8qi, v4hi, v2si) },
- { VAR8 (BINOP, vqsub, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (BINOP, vqsub, v8qi, v4hi, v2si, v1di, v16qi, v8hi, v4si, v2di) },
{ VAR6 (BINOP, vhsub, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
{ VAR3 (BINOP, vsubhn, v8hi, v4si, v2di) },
{ VAR8 (BINOP, vceq, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
@@ -16451,8 +16456,8 @@
{ VAR4 (BINOP, vpmin, v8qi, v4hi, v2si, v2sf) },
{ VAR2 (BINOP, vrecps, v2sf, v4sf) },
{ VAR2 (BINOP, vrsqrts, v2sf, v4sf) },
- { VAR8 (SHIFTINSERT, vsri_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
- { VAR8 (SHIFTINSERT, vsli_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (SHIFTINSERT, vsri_n, v8qi, v4hi, v2si, v1di, v16qi, v8hi, v4si, v2di) },
+ { VAR8 (SHIFTINSERT, vsli_n, v8qi, v4hi, v2si, v1di, v16qi, v8hi, v4si, v2di) },
{ VAR8 (UNOP, vabs, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
{ VAR6 (UNOP, vqabs, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
{ VAR8 (UNOP, vneg, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
@@ -16465,15 +16470,15 @@
{ VAR6 (UNOP, vmvn, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
/* FIXME: vget_lane supports more variants than this! */
{ VAR10 (GETLANE, vget_lane,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR10 (SETLANE, vset_lane,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
- { VAR5 (CREATE, vcreate, v8qi, v4hi, v2si, v2sf, di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR5 (CREATE, vcreate, v8qi, v4hi, v2si, v2sf, v1di) },
{ VAR10 (DUP, vdup_n,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR10 (DUPLANE, vdup_lane,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
- { VAR5 (COMBINE, vcombine, v8qi, v4hi, v2si, v2sf, di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
+ { VAR5 (COMBINE, vcombine, v8qi, v4hi, v2si, v2sf, v1di) },
{ VAR5 (SPLIT, vget_high, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR5 (SPLIT, vget_low, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR3 (UNOP, vmovn, v8hi, v4si, v2di) },
@@ -16495,14 +16500,14 @@
{ VAR2 (SCALARMAC, vmlsl_n, v4hi, v2si) },
{ VAR2 (SCALARMAC, vqdmlsl_n, v4hi, v2si) },
{ VAR10 (BINOP, vext,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR8 (UNOP, vrev64, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
{ VAR4 (UNOP, vrev32, v8qi, v4hi, v16qi, v8hi) },
{ VAR2 (UNOP, vrev16, v8qi, v16qi) },
{ VAR4 (CONVERT, vcvt, v2si, v2sf, v4si, v4sf) },
{ VAR4 (FIXCONV, vcvt_n, v2si, v2sf, v4si, v4sf) },
{ VAR10 (SELECT, vbsl,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR1 (VTBL, vtbl1, v8qi) },
{ VAR1 (VTBL, vtbl2, v8qi) },
{ VAR1 (VTBL, vtbl3, v8qi) },
@@ -16514,64 +16519,65 @@
{ VAR8 (RESULTPAIR, vtrn, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
{ VAR8 (RESULTPAIR, vzip, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
{ VAR8 (RESULTPAIR, vuzp, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
- { VAR5 (REINTERP, vreinterpretv8qi, v8qi, v4hi, v2si, v2sf, di) },
- { VAR5 (REINTERP, vreinterpretv4hi, v8qi, v4hi, v2si, v2sf, di) },
- { VAR5 (REINTERP, vreinterpretv2si, v8qi, v4hi, v2si, v2sf, di) },
- { VAR5 (REINTERP, vreinterpretv2sf, v8qi, v4hi, v2si, v2sf, di) },
- { VAR5 (REINTERP, vreinterpretdi, v8qi, v4hi, v2si, v2sf, di) },
+ { VAR5 (REINTERP, vreinterpretv8qi, v8qi, v4hi, v2si, v2sf, v1di) },
+ { VAR5 (REINTERP, vreinterpretv4hi, v8qi, v4hi, v2si, v2sf, v1di) },
+ { VAR5 (REINTERP, vreinterpretv2si, v8qi, v4hi, v2si, v2sf, v1di) },
+ { VAR5 (REINTERP, vreinterpretv2sf, v8qi, v4hi, v2si, v2sf, v1di) },
+ { VAR5 (REINTERP, vreinterpretv1di, v8qi, v4hi, v2si, v2sf, v1di) },
{ VAR5 (REINTERP, vreinterpretv16qi, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR5 (REINTERP, vreinterpretv8hi, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR5 (REINTERP, vreinterpretv4si, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR5 (REINTERP, vreinterpretv4sf, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR5 (REINTERP, vreinterpretv2di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR10 (LOAD1, vld1,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR10 (LOAD1LANE, vld1_lane,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR10 (LOAD1, vld1_dup,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR10 (STORE1, vst1,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR10 (STORE1LANE, vst1_lane,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR9 (LOADSTRUCT,
- vld2, v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
+ vld2, v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf) },
{ VAR7 (LOADSTRUCTLANE, vld2_lane,
v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
- { VAR5 (LOADSTRUCT, vld2_dup, v8qi, v4hi, v2si, v2sf, di) },
+ { VAR5 (LOADSTRUCT, vld2_dup, v8qi, v4hi, v2si, v2sf, v1di) },
{ VAR9 (STORESTRUCT, vst2,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf) },
{ VAR7 (STORESTRUCTLANE, vst2_lane,
v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
{ VAR9 (LOADSTRUCT,
- vld3, v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
+ vld3, v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf) },
{ VAR7 (LOADSTRUCTLANE, vld3_lane,
v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
- { VAR5 (LOADSTRUCT, vld3_dup, v8qi, v4hi, v2si, v2sf, di) },
+ { VAR5 (LOADSTRUCT, vld3_dup, v8qi, v4hi, v2si, v2sf, v1di) },
{ VAR9 (STORESTRUCT, vst3,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf) },
{ VAR7 (STORESTRUCTLANE, vst3_lane,
v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
{ VAR9 (LOADSTRUCT, vld4,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf) },
{ VAR7 (LOADSTRUCTLANE, vld4_lane,
v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
- { VAR5 (LOADSTRUCT, vld4_dup, v8qi, v4hi, v2si, v2sf, di) },
+ { VAR5 (LOADSTRUCT, vld4_dup, v8qi, v4hi, v2si, v2sf, v1di) },
{ VAR9 (STORESTRUCT, vst4,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf) },
{ VAR7 (STORESTRUCTLANE, vst4_lane,
v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
{ VAR10 (LOGICBINOP, vand,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR10 (LOGICBINOP, vorr,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR10 (BINOP, veor,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR10 (LOGICBINOP, vbic,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) },
{ VAR10 (LOGICBINOP, vorn,
- v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) }
+ v8qi, v4hi, v2si, v2sf, v1di, v16qi, v8hi, v4si, v4sf, v2di) }
};
+/* LLVM LOCAL end */
#undef CF
#undef VAR1
@@ -16605,7 +16611,7 @@
/* LLVM LOCAL begin multi-vector types */
#ifdef ENABLE_LLVM
/* Create a new builtin struct type containing NUMVECS fields (where NUMVECS
- is in the range from 2 to 4) of type VECTYPE. */
+ is in the range from 1 to 4) of type VECTYPE. */
static tree
build_multivec_type (tree vectype, unsigned numvecs, const char *tag)
{
@@ -16617,7 +16623,7 @@
name = build_decl (TYPE_DECL, get_identifier (tag), record);
TYPE_NAME (record) = name;
- gcc_assert (numvecs >= 2 && numvecs <= 4);
+ gcc_assert (numvecs >= 1 && numvecs <= 4);
fields = NULL;
for (n = 0; n < numvecs; ++n)
{
@@ -16671,6 +16677,9 @@
#define v8hi_TN V8HI_type_node
#define v4si_TN V4SI_type_node
#define v4sf_TN V4SF_type_node
+/* LLVM LOCAL begin */
+#define v1di_TN V1DI_type_node
+/* LLVM LOCAL end */
#define v2di_TN V2DI_type_node
/* LLVM LOCAL begin multi-vector types */
@@ -16679,13 +16688,14 @@
#define pv4hi_TN V4HI2_type_node
#define pv2si_TN V2SI2_type_node
#define pv2sf_TN V2SF2_type_node
-#define pdi_TN DI2_type_node
+#define pdi_TN DI1_type_node
#define pv16qi_TN V16QI2_type_node
#define pv8hi_TN V8HI2_type_node
#define pv4si_TN V4SI2_type_node
#define pv4sf_TN V4SF2_type_node
#define pv2di_TN V2DI2_type_node
+#define pv1di_TN V1DI2_type_node
#else /* !ENABLE_LLVM */
#define pv8qi_TN V8QI_pointer_node
#define pv4hi_TN V4HI_pointer_node
@@ -16775,6 +16785,11 @@
build_vector_type_for_mode (neon_intSI_type_node, V2SImode);
tree V2SF_type_node =
build_vector_type_for_mode (neon_float_type_node, V2SFmode);
+ /* LLVM LOCAL begin */
+ tree V1DI_type_node =
+ build_vector_type_for_mode (neon_intDI_type_node, V1DImode);
+ /* LLVM LOCAL end */
+
/* 128-bit vectors. */
tree V16QI_type_node =
build_vector_type_for_mode (neon_intQI_type_node, V16QImode);
@@ -16801,6 +16816,10 @@
"__builtin_neon_v4hi2");
tree V2SI2_type_node = build_multivec_type (V2SI_type_node, 2,
"__builtin_neon_v2si2");
+ tree V1DI2_type_node = build_multivec_type (V1DI_type_node, 1,
+ "__builtin_neon_v1di2");
+ tree DI1_type_node = build_multivec_type (neon_intDI_type_node, 1,
+ "__builtin_neon_di1");
tree DI2_type_node = build_multivec_type (neon_intDI_type_node, 2,
"__builtin_neon_di2");
tree V2SF2_type_node = build_multivec_type (V2SF_type_node, 2,
@@ -16899,13 +16918,14 @@
#endif /* ENABLE_LLVM */
/* LLVM LOCAL end multi-vector types */
+ /* LLVM LOCAL begin */
/* Binops, all-doubleword arithmetic. */
TYPE4 (v8qi, v8qi, v8qi, si);
TYPE4 (v4hi, v4hi, v4hi, si);
TYPE4 (v2si, v2si, v2si, si);
TYPE4 (v2sf, v2sf, v2sf, si);
- TYPE4 (di, di, di, si);
-
+ TYPE4 (v1di, v1di, v1di, si);
+
/* Binops, all-quadword arithmetic. */
TYPE4 (v16qi, v16qi, v16qi, si);
TYPE4 (v8hi, v8hi, v8hi, si);
@@ -16936,7 +16956,7 @@
/* Binops, dest and first operand elements wider (vpadal). */
TYPE4 (v4hi, v4hi, v8qi, si);
TYPE4 (v2si, v2si, v4hi, si);
- TYPE4 (di, di, v2si, si);
+ TYPE4 (v1di, v1di, v2si, si);
TYPE4 (v8hi, v8hi, v16qi, si);
TYPE4 (v4si, v4si, v8hi, si);
TYPE4 (v2di, v2di, v4si, si);
@@ -16964,7 +16984,7 @@
TYPE3 (v4hi, v4hi, si);
TYPE3 (v2si, v2si, si);
TYPE3 (v2sf, v2sf, si);
- TYPE3 (di, di, si);
+ TYPE3 (v1di, v1di, si);
/* Unops, all-quadword arithmetic. */
TYPE3 (v16qi, v16qi, si);
@@ -16986,7 +17006,7 @@
/* Unops, dest elements wider (vpaddl). */
TYPE3 (v4hi, v8qi, si);
TYPE3 (v2si, v4hi, si);
- TYPE3 (di, v2si, si);
+ TYPE3 (v1di, v2si, si);
TYPE3 (v8hi, v16qi, si);
TYPE3 (v4si, v8hi, si);
TYPE3 (v2di, v4si, si);
@@ -16996,7 +17016,7 @@
TYPE4 (hi, v4hi, si, si);
TYPE4 (si, v2si, si, si);
TYPE4 (sf, v2sf, si, si);
- TYPE4 (di, di, si, si);
+ TYPE4 (di, v1di, si, si);
/* Get-lane from quadword insns. */
TYPE4 (qi, v16qi, si, si);
@@ -17010,6 +17030,7 @@
TYPE4 (v4hi, hi, v4hi, si);
TYPE4 (v2si, si, v2si, si);
TYPE4 (v2sf, sf, v2sf, si);
+ TYPE4 (v1di, di, v1di, si);
/* Set lane in quadword insns. */
TYPE4 (v16qi, qi, v16qi, si);
@@ -17023,7 +17044,7 @@
TYPE2 (v4hi, di);
TYPE2 (v2si, di);
TYPE2 (v2sf, di);
- TYPE2 (di, di);
+ TYPE2 (v1di, di);
/* Duplicate an ARM register into lanes of a vector. */
TYPE2 (v8qi, qi);
@@ -17042,21 +17063,21 @@
TYPE3 (v8hi, v4hi, si);
TYPE3 (v4si, v2si, si);
TYPE3 (v4sf, v2sf, si);
- TYPE3 (v2di, di, si);
+ TYPE3 (v2di, v1di, si);
/* Combine doubleword vectors into quadword vectors. */
TYPE3 (v16qi, v8qi, v8qi);
TYPE3 (v8hi, v4hi, v4hi);
TYPE3 (v4si, v2si, v2si);
TYPE3 (v4sf, v2sf, v2sf);
- TYPE3 (v2di, di, di);
+ TYPE3 (v2di, v1di, v1di);
/* Split quadword vectors into high or low parts. */
TYPE2 (v8qi, v16qi);
TYPE2 (v4hi, v8hi);
TYPE2 (v2si, v4si);
TYPE2 (v2sf, v4sf);
- TYPE2 (di, v2di);
+ TYPE2 (v1di, v2di);
/* Conversions, int<->float. */
TYPE3 (v2si, v2sf, si);
@@ -17123,7 +17144,7 @@
TYPE4 (v4hi, v4hi, v4hi, v4hi);
TYPE4 (v2si, v2si, v2si, v2si);
TYPE4 (v2sf, v2si, v2sf, v2sf);
- TYPE4 (di, di, di, di);
+ TYPE4 (v1di, v1di, v1di, v1di);
TYPE4 (v16qi, v16qi, v16qi, v16qi);
TYPE4 (v8hi, v8hi, v8hi, v8hi);
@@ -17134,6 +17155,7 @@
/* Shift immediate operations. */
TYPE4 (v8qi, v8qi, si, si);
TYPE4 (v4hi, v4hi, si, si);
+ TYPE4 (v1di, v1di, si, si);
TYPE4 (v16qi, v16qi, si, si);
TYPE4 (v8hi, v8hi, si, si);
@@ -17151,12 +17173,13 @@
/* Shift + accumulate operations. */
TYPE5 (v8qi, v8qi, v8qi, si, si);
- TYPE5 (di, di, di, si, si);
+ TYPE5 (v1di, v1di, v1di, si, si);
TYPE5 (v16qi, v16qi, v16qi, si, si);
TYPE5 (v8hi, v8hi, v8hi, si, si);
TYPE5 (v4sf, v4sf, v4sf, si, si);
TYPE5 (v2di, v2di, v2di, si, si);
+ /* LLVM LOCAL end */
/* Operations which return results as pairs. */
/* LLVM LOCAL begin multi-vector types */
@@ -17164,7 +17187,7 @@
TYPE4_RESULTPAIR (void, pv4hi, v4hi, v4hi);
TYPE4_RESULTPAIR (void, pv2si, v2si, v2si);
TYPE4_RESULTPAIR (void, pv2sf, v2sf, v2sf);
- TYPE4_RESULTPAIR (void, pdi, di, di);
+ TYPE4_RESULTPAIR (void, pv1di, v1di, v1di);
TYPE4_RESULTPAIR (void, pv16qi, v16qi, v16qi);
TYPE4_RESULTPAIR (void, pv8hi, v8hi, v8hi);
@@ -17192,8 +17215,10 @@
build_function_type_list (V4HI_type_node, const_intHI_pointer_node, NULL);
tree v2si_ftype_const_si_pointer =
build_function_type_list (V2SI_type_node, const_intSI_pointer_node, NULL);
- tree di_ftype_const_di_pointer =
- build_function_type_list (intDI_type_node, const_intDI_pointer_node, NULL);
+ /* LLVM LOCAL begin */
+ tree v1di_ftype_const_di_pointer =
+ build_function_type_list (V1DI_type_node, const_intDI_pointer_node, NULL);
+ /* LLVM LOCAL end */
tree v2sf_ftype_const_sf_pointer =
build_function_type_list (V2SF_type_node, const_float_pointer_node, NULL);
@@ -17219,9 +17244,11 @@
tree v2si_ftype_const_si_pointer_v2si_si =
build_function_type_list (V2SI_type_node, const_intSI_pointer_node,
V2SI_type_node, intSI_type_node, NULL);
- tree di_ftype_const_di_pointer_di_si =
- build_function_type_list (intDI_type_node, const_intDI_pointer_node,
- intDI_type_node, intSI_type_node, NULL);
+ /* LLVM LOCAL begin */
+ tree v1di_ftype_const_di_pointer_v1di_si =
+ build_function_type_list (V1DI_type_node, const_intDI_pointer_node,
+ V1DI_type_node, intSI_type_node, NULL);
+ /* LLVM LOCAL end */
tree v2sf_ftype_const_sf_pointer_v2sf_si =
build_function_type_list (V2SF_type_node, const_float_pointer_node,
V2SF_type_node, intSI_type_node, NULL);
@@ -17253,9 +17280,11 @@
tree void_ftype_si_pointer_v2si =
build_function_type_list (void_type_node, intSI_pointer_node,
V2SI_type_node, NULL);
- tree void_ftype_di_pointer_di =
+ /* LLVM LOCAL begin */
+ tree void_ftype_di_pointer_v1di =
build_function_type_list (void_type_node, intDI_pointer_node,
- intDI_type_node, NULL);
+ V1DI_type_node, NULL);
+ /* LLVM LOCAL end */
tree void_ftype_sf_pointer_v2sf =
build_function_type_list (void_type_node, float_pointer_node,
V2SF_type_node, NULL);
@@ -17287,9 +17316,11 @@
tree void_ftype_si_pointer_v2si_si =
build_function_type_list (void_type_node, intSI_pointer_node,
V2SI_type_node, intSI_type_node, NULL);
- tree void_ftype_di_pointer_di_si =
+ /* LLVM LOCAL begin */
+ tree void_ftype_di_pointer_v1di_si =
build_function_type_list (void_type_node, intDI_pointer_node,
- intDI_type_node, intSI_type_node, NULL);
+ V1DI_type_node, intSI_type_node, NULL);
+ /* LLVM LOCAL end */
tree void_ftype_sf_pointer_v2sf_si =
build_function_type_list (void_type_node, float_pointer_node,
V2SF_type_node, intSI_type_node, NULL);
@@ -17678,6 +17709,8 @@
"__builtin_neon_v4hi2");
(*lang_hooks.types.register_builtin_type) (V2SI2_type_node,
"__builtin_neon_v2si2");
+ (*lang_hooks.types.register_builtin_type) (DI1_type_node,
+ "__builtin_neon_di1");
(*lang_hooks.types.register_builtin_type) (DI2_type_node,
"__builtin_neon_di2");
(*lang_hooks.types.register_builtin_type) (V2SF2_type_node,
@@ -17733,7 +17766,9 @@
dreg_types[1] = V4HI_type_node;
dreg_types[2] = V2SI_type_node;
dreg_types[3] = V2SF_type_node;
- dreg_types[4] = neon_intDI_type_node;
+ /* LLVM LOCAL begin */
+ dreg_types[4] = V1DI_type_node;
+ /* LLVM LOCAL end */
qreg_types[0] = V16QI_type_node;
qreg_types[1] = V8HI_type_node;
@@ -17762,10 +17797,12 @@
for (j = 0; j < T_MAX; j++)
{
+ /* LLVM LOCAL begin */
const char* const modenames[] = {
- "v8qi", "v4hi", "v2si", "v2sf", "di",
+ "v8qi", "v4hi", "v2si", "v2sf", "v1di",
"v16qi", "v8hi", "v4si", "v4sf", "v2di"
};
+ /* LLVM LOCAL end */
char namebuf[60];
tree ftype = NULL;
enum insn_code icode;
@@ -17821,12 +17858,14 @@
ftype = v2sf_ftype_v2sf_si;
break;
- case DImode:
- if (mode0 == DImode)
- ftype = di_ftype_di_si;
+ /* LLVM LOCAL begin */
+ case V1DImode:
+ if (mode0 == V1DImode)
+ ftype = v1di_ftype_v1di_si;
else if (mode0 == V2SImode)
- ftype = di_ftype_v2si_si;
+ ftype = v1di_ftype_v2si_si;
break;
+ /* LLVM LOCAL end */
case V16QImode:
if (mode0 == V16QImode)
@@ -17911,12 +17950,14 @@
ftype = v2sf_ftype_v2sf_v2sf_si;
break;
- case DImode:
- if (mode0 == DImode && mode1 == DImode)
- ftype = di_ftype_di_di_si;
- else if (mode0 == DImode && mode1 == V2SImode)
- ftype = di_ftype_di_v2si_si;
+ /* LLVM LOCAL begin */
+ case V1DImode:
+ if (mode0 == V1DImode && mode1 == V1DImode)
+ ftype = v1di_ftype_v1di_v1di_si;
+ else if (mode0 == V1DImode && mode1 == V2SImode)
+ ftype = v1di_ftype_v1di_v2si_si;
break;
+ /* LLVM LOCAL end */
case V16QImode:
if (mode0 == V16QImode && mode1 == V16QImode)
@@ -18045,7 +18086,7 @@
case NEON_GETLANE:
/* Vector lane extraction. */
- gcc_assert (valid_neon_mode (mode0) && mode1 == SImode
+ gcc_assert (valid_neon_mode (mode0) && mode1 == SImode
&& mode2 == SImode);
switch (tmode)
{
@@ -18077,12 +18118,14 @@
ftype = sf_ftype_v4sf_si_si;
break;
+ /* LLVM LOCAL begin */
case DImode:
- if (mode0 == DImode)
- ftype = di_ftype_di_si_si;
+ if (mode0 == V1DImode)
+ ftype = di_ftype_v1di_si_si;
else if (mode0 == V2DImode)
ftype = di_ftype_v2di_si_si;
break;
+ /* LLVM LOCAL end */
default:
gcc_unreachable ();
@@ -18115,10 +18158,12 @@
ftype = v2sf_ftype_sf_v2sf_si;
break;
- case DImode:
- if (mode0 == DImode && mode1 == DImode)
- ftype = di_ftype_di_di_si;
+ /* LLVM LOCAL begin */
+ case V1DImode:
+ if (mode0 == DImode && mode1 == V1DImode)
+ ftype = v1di_ftype_di_v1di_si;
break;
+ /* LLVM LOCAL end */
case V16QImode:
if (mode0 == QImode && mode1 == V16QImode)
@@ -18160,13 +18205,15 @@
case V4HImode: ftype = v4hi_ftype_di; break;
case V2SImode: ftype = v2si_ftype_di; break;
case V2SFmode: ftype = v2sf_ftype_di; break;
- case DImode: ftype = di_ftype_di; break;
+ /* LLVM LOCAL begin */
+ case V1DImode: ftype = v1di_ftype_di; break;
+ /* LLVM LOCAL end */
default: gcc_unreachable ();
}
break;
case NEON_DUP:
- gcc_assert ((mode0 == DImode && tmode == DImode)
+ gcc_assert ((mode0 == DImode && tmode == V1DImode)
|| mode0 == GET_MODE_INNER (tmode));
switch (tmode)
{
@@ -18174,7 +18221,9 @@
case V4HImode: ftype = v4hi_ftype_hi; break;
case V2SImode: ftype = v2si_ftype_si; break;
case V2SFmode: ftype = v2sf_ftype_sf; break;
- case DImode: ftype = di_ftype_di; break;
+ /* LLVM LOCAL begin */
+ case V1DImode: ftype = v1di_ftype_di; break;
+ /* LLVM LOCAL end */
case V16QImode: ftype = v16qi_ftype_qi; break;
case V8HImode: ftype = v8hi_ftype_hi; break;
case V4SImode: ftype = v4si_ftype_si; break;
@@ -18192,12 +18241,16 @@
case V4HImode: ftype = v4hi_ftype_v4hi_si; break;
case V2SImode: ftype = v2si_ftype_v2si_si; break;
case V2SFmode: ftype = v2sf_ftype_v2sf_si; break;
- case DImode: ftype = di_ftype_di_si; break;
+ /* LLVM LOCAL begin */
+ case V1DImode: ftype = v1di_ftype_v1di_si; break;
+ /* LLVM LOCAL end */
case V16QImode: ftype = v16qi_ftype_v8qi_si; break;
case V8HImode: ftype = v8hi_ftype_v4hi_si; break;
case V4SImode: ftype = v4si_ftype_v2si_si; break;
case V4SFmode: ftype = v4sf_ftype_v2sf_si; break;
- case V2DImode: ftype = v2di_ftype_di_si; break;
+ /* LLVM LOCAL begin */
+ case V2DImode: ftype = v2di_ftype_v1di_si; break;
+ /* LLVM LOCAL end */
default: gcc_unreachable ();
}
break;
@@ -18227,10 +18280,12 @@
ftype = v2si_ftype_v2di_si_si;
break;
- case DImode:
- if (mode0 == DImode)
- ftype = di_ftype_di_si_si;
+ /* LLVM LOCAL begin */
+ case V1DImode:
+ if (mode0 == V1DImode)
+ ftype = v1di_ftype_v1di_si_si;
break;
+ /* LLVM LOCAL end */
case V16QImode:
if (mode0 == V16QImode)
@@ -18271,7 +18326,9 @@
case V4HImode: ftype = v4hi_ftype_v4hi_v4hi_si_si; break;
case V2SImode: ftype = v2si_ftype_v2si_v2si_si_si; break;
case V2SFmode: ftype = v2sf_ftype_v2sf_v2sf_si_si; break;
- case DImode: ftype = di_ftype_di_di_si_si; break;
+ /* LLVM LOCAL begin */
+ case V1DImode: ftype = v1di_ftype_v1di_v1di_si_si; break;
+ /* LLVM LOCAL end */
case V16QImode: ftype = v16qi_ftype_v16qi_v16qi_si_si; break;
case V8HImode: ftype = v8hi_ftype_v8hi_v8hi_si_si; break;
case V4SImode: ftype = v4si_ftype_v4si_v4si_si_si; break;
@@ -18304,11 +18361,13 @@
if (mode0 == V2SFmode && mode1 == V2SFmode)
ftype = v4sf_ftype_v2sf_v2sf;
break;
-
+
+ /* LLVM LOCAL begin */
case V2DImode:
- if (mode0 == DImode && mode1 == DImode)
- ftype = v2di_ftype_di_di;
+ if (mode0 == V1DImode && mode1 == V1DImode)
+ ftype = v2di_ftype_v1di_v1di;
break;
+ /* LLVM LOCAL end */
default:
gcc_unreachable ();
@@ -18339,10 +18398,12 @@
ftype = v2sf_ftype_v4sf;
break;
- case DImode:
+ /* LLVM LOCAL begin */
+ case V1DImode:
if (mode0 == V2DImode)
- ftype = di_ftype_v2di;
+ ftype = v1di_ftype_v2di;
break;
+ /* LLVM LOCAL end */
default:
gcc_unreachable ();
@@ -18711,7 +18772,9 @@
case V4HImode: ftype = v4hi_ftype_v4hi_v4hi_v4hi; break;
case V2SImode: ftype = v2si_ftype_v2si_v2si_v2si; break;
case V2SFmode: ftype = v2sf_ftype_v2si_v2sf_v2sf; break;
- case DImode: ftype = di_ftype_di_di_di; break;
+ /* LLVM LOCAL begin */
+ case V1DImode: ftype = v1di_ftype_v1di_v1di_v1di; break;
+ /* LLVM LOCAL end */
case V16QImode: ftype = v16qi_ftype_v16qi_v16qi_v16qi; break;
case V8HImode: ftype = v8hi_ftype_v8hi_v8hi_v8hi; break;
case V4SImode: ftype = v4si_ftype_v4si_v4si_v4si; break;
@@ -18758,7 +18821,9 @@
case V4HImode: ftype = void_ftype_pv4hi_v4hi_v4hi; break;
case V2SImode: ftype = void_ftype_pv2si_v2si_v2si; break;
case V2SFmode: ftype = void_ftype_pv2sf_v2sf_v2sf; break;
- case DImode: ftype = void_ftype_pdi_di_di; break;
+ /* LLVM LOCAL begin */
+ case V1DImode: ftype = void_ftype_pv1di_v1di_v1di; break;
+ /* LLVM LOCAL end */
case V16QImode: ftype = void_ftype_pv16qi_v16qi_v16qi; break;
case V8HImode: ftype = void_ftype_pv8hi_v8hi_v8hi; break;
case V4SImode: ftype = void_ftype_pv4si_v4si_v4si; break;
@@ -18780,7 +18845,9 @@
case V4HImode: ftype = reinterp_ftype_dreg[1][rhs]; break;
case V2SImode: ftype = reinterp_ftype_dreg[2][rhs]; break;
case V2SFmode: ftype = reinterp_ftype_dreg[3][rhs]; break;
- case DImode: ftype = reinterp_ftype_dreg[4][rhs]; break;
+ /* LLVM LOCAL begin */
+ case V1DImode: ftype = reinterp_ftype_dreg[4][rhs]; break;
+ /* LLVM LOCAL end */
case V16QImode: ftype = reinterp_ftype_qreg[0][rhs]; break;
case V8HImode: ftype = reinterp_ftype_qreg[1][rhs]; break;
case V4SImode: ftype = reinterp_ftype_qreg[2][rhs]; break;
@@ -18798,7 +18865,9 @@
case V4HImode: ftype = v4hi_ftype_const_hi_pointer; break;
case V2SImode: ftype = v2si_ftype_const_si_pointer; break;
case V2SFmode: ftype = v2sf_ftype_const_sf_pointer; break;
- case DImode: ftype = di_ftype_const_di_pointer; break;
+ /* LLVM LOCAL begin */
+ case V1DImode: ftype = v1di_ftype_const_di_pointer; break;
+ /* LLVM LOCAL end */
case V16QImode: ftype = v16qi_ftype_const_qi_pointer; break;
case V8HImode: ftype = v8hi_ftype_const_hi_pointer; break;
case V4SImode: ftype = v4si_ftype_const_si_pointer; break;
@@ -18823,9 +18892,11 @@
case V2SFmode:
ftype = v2sf_ftype_const_sf_pointer_v2sf_si;
break;
- case DImode:
- ftype = di_ftype_const_di_pointer_di_si;
+ /* LLVM LOCAL begin */
+ case V1DImode:
+ ftype = v1di_ftype_const_di_pointer_v1di_si;
break;
+ /* LLVM LOCAL end */
case V16QImode:
ftype = v16qi_ftype_const_qi_pointer_v16qi_si;
break;
@@ -18853,7 +18924,9 @@
case V4HImode: ftype = void_ftype_hi_pointer_v4hi; break;
case V2SImode: ftype = void_ftype_si_pointer_v2si; break;
case V2SFmode: ftype = void_ftype_sf_pointer_v2sf; break;
- case DImode: ftype = void_ftype_di_pointer_di; break;
+ /* LLVM LOCAL begin */
+ case V1DImode: ftype = void_ftype_di_pointer_v1di; break;
+ /* LLVM LOCAL end */
case V16QImode: ftype = void_ftype_qi_pointer_v16qi; break;
case V8HImode: ftype = void_ftype_hi_pointer_v8hi; break;
case V4SImode: ftype = void_ftype_si_pointer_v4si; break;
@@ -18870,7 +18943,9 @@
case V4HImode: ftype = void_ftype_hi_pointer_v4hi_si; break;
case V2SImode: ftype = void_ftype_si_pointer_v2si_si; break;
case V2SFmode: ftype = void_ftype_sf_pointer_v2sf_si; break;
- case DImode: ftype = void_ftype_di_pointer_di_si; break;
+ /* LLVM LOCAL begin */
+ case V1DImode: ftype = void_ftype_di_pointer_v1di_si; break;
+ /* LLVM LOCAL end */
case V16QImode: ftype = void_ftype_qi_pointer_v16qi_si; break;
case V8HImode: ftype = void_ftype_hi_pointer_v8hi_si; break;
case V4SImode: ftype = void_ftype_si_pointer_v4si_si; break;
@@ -18891,7 +18966,9 @@
case T_V4HI: ftype = ti_ftype_const_hi_pointer; break;
case T_V2SI: ftype = ti_ftype_const_si_pointer; break;
case T_V2SF: ftype = ti_ftype_const_sf_pointer; break;
- case T_DI: ftype = ti_ftype_const_di_pointer; break;
+ /* LLVM LOCAL begin */
+ case T_V1DI: ftype = ti_ftype_const_di_pointer; break;
+ /* LLVM LOCAL end */
default: gcc_unreachable ();
}
break;
@@ -18904,7 +18981,9 @@
case T_V4HI: ftype = ei_ftype_const_hi_pointer; break;
case T_V2SI: ftype = ei_ftype_const_si_pointer; break;
case T_V2SF: ftype = ei_ftype_const_sf_pointer; break;
- case T_DI: ftype = ei_ftype_const_di_pointer; break;
+ /* LLVM LOCAL begin */
+ case T_V1DI: ftype = ei_ftype_const_di_pointer; break;
+ /* LLVM LOCAL end */
default: gcc_unreachable ();
}
break;
@@ -18925,7 +19004,9 @@
case T_V2SI: ftype = oid_ftype_const_si_pointer; break;
case T_V2SF: ftype = oid_ftype_const_sf_pointer; break;
/* LLVM LOCAL end multi-vector types */
- case T_DI: ftype = oi_ftype_const_di_pointer; break;
+ /* LLVM LOCAL begin */
+ case T_V1DI: ftype = oi_ftype_const_di_pointer; break;
+ /* LLVM LOCAL end */
default: gcc_unreachable ();
}
break;
@@ -19091,7 +19172,9 @@
case T_V4HI: ftype = void_ftype_hi_pointer_ti; break;
case T_V2SI: ftype = void_ftype_si_pointer_ti; break;
case T_V2SF: ftype = void_ftype_sf_pointer_ti; break;
- case T_DI: ftype = void_ftype_di_pointer_ti; break;
+ /* LLVM LOCAL begin */
+ case T_V1DI: ftype = void_ftype_di_pointer_ti; break;
+ /* LLVM LOCAL end */
default: gcc_unreachable ();
}
break;
@@ -19104,7 +19187,9 @@
case T_V4HI: ftype = void_ftype_hi_pointer_ei; break;
case T_V2SI: ftype = void_ftype_si_pointer_ei; break;
case T_V2SF: ftype = void_ftype_sf_pointer_ei; break;
- case T_DI: ftype = void_ftype_di_pointer_ei; break;
+ /* LLVM LOCAL begin */
+ case T_V1DI: ftype = void_ftype_di_pointer_ei; break;
+ /* LLVM LOCAL end */
default: gcc_unreachable ();
}
break;
@@ -19125,7 +19210,9 @@
case T_V2SI: ftype = void_ftype_si_pointer_oid; break;
case T_V2SF: ftype = void_ftype_sf_pointer_oid; break;
/* LLVM LOCAL end multi-vector types */
- case T_DI: ftype = void_ftype_di_pointer_oi; break;
+ /* LLVM LOCAL begin */
+ case T_V1DI: ftype = void_ftype_di_pointer_oi; break;
+ /* LLVM LOCAL end */
default: gcc_unreachable ();
}
break;
Modified: llvm-gcc-4.2/trunk/gcc/config/arm/arm.h
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/arm.h?rev=80638&r1=80637&r2=80638&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/arm.h (original)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/arm.h Mon Aug 31 18:16:02 2009
@@ -1138,9 +1138,11 @@
/* APPLE LOCAL begin v7 support. Merge from Codesourcery */
/* Modes valid for Neon D registers. */
+/* LLVM LOCAL begin */
#define VALID_NEON_DREG_MODE(MODE) \
((MODE) == V2SImode || (MODE) == V4HImode || (MODE) == V8QImode \
- || (MODE) == V2SFmode || (MODE) == DImode)
+ || (MODE) == V2SFmode || (MODE) == V1DImode)
+/* LLVM LOCAL end */
/* Modes valid for Neon Q registers. */
#define VALID_NEON_QREG_MODE(MODE) \
@@ -3365,7 +3367,9 @@
NEON_BUILTIN_vreinterpretv4hi,
NEON_BUILTIN_vreinterpretv2si,
NEON_BUILTIN_vreinterpretv2sf,
- NEON_BUILTIN_vreinterpretdi,
+ /* LLVM LOCAL begin */
+ NEON_BUILTIN_vreinterpretv1di,
+ /* LLVM LOCAL end */
NEON_BUILTIN_vreinterpretv16qi,
NEON_BUILTIN_vreinterpretv8hi,
NEON_BUILTIN_vreinterpretv4si,
Modified: llvm-gcc-4.2/trunk/gcc/config/arm/arm_neon.h
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/arm_neon.h?rev=80638&r1=80637&r2=80638&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/arm_neon.h (original)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/arm_neon.h Mon Aug 31 18:16:02 2009
@@ -46,14 +46,14 @@
typedef __builtin_neon_qi int8x8_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_hi int16x4_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_si int32x2_t __attribute__ ((__vector_size__ (8)));
-typedef __builtin_neon_di int64x1_t;
+typedef __builtin_neon_di int64x1_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_sf float32x2_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_poly8 poly8x8_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_poly16 poly16x4_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_uqi uint8x8_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_uhi uint16x4_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_usi uint32x2_t __attribute__ ((__vector_size__ (8)));
-typedef __builtin_neon_udi uint64x1_t;
+typedef __builtin_neon_udi uint64x1_t __attribute__ ((__vector_size__ (8)));
typedef __builtin_neon_qi int8x16_t __attribute__ ((__vector_size__ (16)));
typedef __builtin_neon_hi int16x8_t __attribute__ ((__vector_size__ (16)));
typedef __builtin_neon_si int32x4_t __attribute__ ((__vector_size__ (16)));
@@ -411,7 +411,7 @@
(int32x2_t)__builtin_neon_vaddv2si (__a, __b, 1)
#define vadd_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vadddi (__a, __b, 1)
+ (int64x1_t)__builtin_neon_vaddv1di (__a, __b, 1)
#define vadd_f32(__a, __b) \
(float32x2_t)__builtin_neon_vaddv2sf (__a, __b, 5)
@@ -426,7 +426,7 @@
(uint32x2_t)__builtin_neon_vaddv2si (__a, __b, 0)
#define vadd_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vadddi (__a, __b, 0)
+ (uint64x1_t)__builtin_neon_vaddv1di (__a, __b, 0)
#define vaddq_s8(__a, __b) \
(int8x16_t)__builtin_neon_vaddv16qi (__a, __b, 1)
@@ -573,7 +573,7 @@
(int32x2_t)__builtin_neon_vqaddv2si (__a, __b, 1)
#define vqadd_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vqadddi (__a, __b, 1)
+ (int64x1_t)__builtin_neon_vqaddv1di (__a, __b, 1)
#define vqadd_u8(__a, __b) \
(uint8x8_t)__builtin_neon_vqaddv8qi (__a, __b, 0)
@@ -585,7 +585,7 @@
(uint32x2_t)__builtin_neon_vqaddv2si (__a, __b, 0)
#define vqadd_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vqadddi (__a, __b, 0)
+ (uint64x1_t)__builtin_neon_vqaddv1di (__a, __b, 0)
#define vqaddq_s8(__a, __b) \
(int8x16_t)__builtin_neon_vqaddv16qi (__a, __b, 1)
@@ -888,7 +888,7 @@
(int32x2_t)__builtin_neon_vsubv2si (__a, __b, 1)
#define vsub_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vsubdi (__a, __b, 1)
+ (int64x1_t)__builtin_neon_vsubv1di (__a, __b, 1)
#define vsub_f32(__a, __b) \
(float32x2_t)__builtin_neon_vsubv2sf (__a, __b, 5)
@@ -903,7 +903,7 @@
(uint32x2_t)__builtin_neon_vsubv2si (__a, __b, 0)
#define vsub_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vsubdi (__a, __b, 0)
+ (uint64x1_t)__builtin_neon_vsubv1di (__a, __b, 0)
#define vsubq_s8(__a, __b) \
(int8x16_t)__builtin_neon_vsubv16qi (__a, __b, 1)
@@ -1014,7 +1014,7 @@
(int32x2_t)__builtin_neon_vqsubv2si (__a, __b, 1)
#define vqsub_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vqsubdi (__a, __b, 1)
+ (int64x1_t)__builtin_neon_vqsubv1di (__a, __b, 1)
#define vqsub_u8(__a, __b) \
(uint8x8_t)__builtin_neon_vqsubv8qi (__a, __b, 0)
@@ -1026,7 +1026,7 @@
(uint32x2_t)__builtin_neon_vqsubv2si (__a, __b, 0)
#define vqsub_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vqsubdi (__a, __b, 0)
+ (uint64x1_t)__builtin_neon_vqsubv1di (__a, __b, 0)
#define vqsubq_s8(__a, __b) \
(int8x16_t)__builtin_neon_vqsubv16qi (__a, __b, 1)
@@ -1725,7 +1725,7 @@
(int32x2_t)__builtin_neon_vshlv2si (__a, __b, 1)
#define vshl_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vshldi (__a, __b, 1)
+ (int64x1_t)__builtin_neon_vshlv1di (__a, __b, 1)
#define vshl_u8(__a, __b) \
(uint8x8_t)__builtin_neon_vshlv8qi (__a, __b, 0)
@@ -1737,7 +1737,7 @@
(uint32x2_t)__builtin_neon_vshlv2si (__a, __b, 0)
#define vshl_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vshldi (__a, __b, 0)
+ (uint64x1_t)__builtin_neon_vshlv1di (__a, __b, 0)
#define vshlq_s8(__a, __b) \
(int8x16_t)__builtin_neon_vshlv16qi (__a, __b, 1)
@@ -1773,7 +1773,7 @@
(int32x2_t)__builtin_neon_vshlv2si (__a, __b, 3)
#define vrshl_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vshldi (__a, __b, 3)
+ (int64x1_t)__builtin_neon_vshlv1di (__a, __b, 3)
#define vrshl_u8(__a, __b) \
(uint8x8_t)__builtin_neon_vshlv8qi (__a, __b, 2)
@@ -1785,7 +1785,7 @@
(uint32x2_t)__builtin_neon_vshlv2si (__a, __b, 2)
#define vrshl_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vshldi (__a, __b, 2)
+ (uint64x1_t)__builtin_neon_vshlv1di (__a, __b, 2)
#define vrshlq_s8(__a, __b) \
(int8x16_t)__builtin_neon_vshlv16qi (__a, __b, 3)
@@ -1821,7 +1821,7 @@
(int32x2_t)__builtin_neon_vqshlv2si (__a, __b, 1)
#define vqshl_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vqshldi (__a, __b, 1)
+ (int64x1_t)__builtin_neon_vqshlv1di (__a, __b, 1)
#define vqshl_u8(__a, __b) \
(uint8x8_t)__builtin_neon_vqshlv8qi (__a, __b, 0)
@@ -1833,7 +1833,7 @@
(uint32x2_t)__builtin_neon_vqshlv2si (__a, __b, 0)
#define vqshl_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vqshldi (__a, __b, 0)
+ (uint64x1_t)__builtin_neon_vqshlv1di (__a, __b, 0)
#define vqshlq_s8(__a, __b) \
(int8x16_t)__builtin_neon_vqshlv16qi (__a, __b, 1)
@@ -1869,7 +1869,7 @@
(int32x2_t)__builtin_neon_vqshlv2si (__a, __b, 3)
#define vqrshl_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vqshldi (__a, __b, 3)
+ (int64x1_t)__builtin_neon_vqshlv1di (__a, __b, 3)
#define vqrshl_u8(__a, __b) \
(uint8x8_t)__builtin_neon_vqshlv8qi (__a, __b, 2)
@@ -1881,7 +1881,7 @@
(uint32x2_t)__builtin_neon_vqshlv2si (__a, __b, 2)
#define vqrshl_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vqshldi (__a, __b, 2)
+ (uint64x1_t)__builtin_neon_vqshlv1di (__a, __b, 2)
#define vqrshlq_s8(__a, __b) \
(int8x16_t)__builtin_neon_vqshlv16qi (__a, __b, 3)
@@ -1917,7 +1917,7 @@
(int32x2_t)__builtin_neon_vshr_nv2si (__a, __b, 1)
#define vshr_n_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vshr_ndi (__a, __b, 1)
+ (int64x1_t)__builtin_neon_vshr_nv1di (__a, __b, 1)
#define vshr_n_u8(__a, __b) \
(uint8x8_t)__builtin_neon_vshr_nv8qi (__a, __b, 0)
@@ -1929,7 +1929,7 @@
(uint32x2_t)__builtin_neon_vshr_nv2si (__a, __b, 0)
#define vshr_n_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vshr_ndi (__a, __b, 0)
+ (uint64x1_t)__builtin_neon_vshr_nv1di (__a, __b, 0)
#define vshrq_n_s8(__a, __b) \
(int8x16_t)__builtin_neon_vshr_nv16qi (__a, __b, 1)
@@ -1965,7 +1965,7 @@
(int32x2_t)__builtin_neon_vshr_nv2si (__a, __b, 3)
#define vrshr_n_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vshr_ndi (__a, __b, 3)
+ (int64x1_t)__builtin_neon_vshr_nv1di (__a, __b, 3)
#define vrshr_n_u8(__a, __b) \
(uint8x8_t)__builtin_neon_vshr_nv8qi (__a, __b, 2)
@@ -1977,7 +1977,7 @@
(uint32x2_t)__builtin_neon_vshr_nv2si (__a, __b, 2)
#define vrshr_n_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vshr_ndi (__a, __b, 2)
+ (uint64x1_t)__builtin_neon_vshr_nv1di (__a, __b, 2)
#define vrshrq_n_s8(__a, __b) \
(int8x16_t)__builtin_neon_vshr_nv16qi (__a, __b, 3)
@@ -2103,7 +2103,7 @@
(int32x2_t)__builtin_neon_vshl_nv2si (__a, __b, 1)
#define vshl_n_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vshl_ndi (__a, __b, 1)
+ (int64x1_t)__builtin_neon_vshl_nv1di (__a, __b, 1)
#define vshl_n_u8(__a, __b) \
(uint8x8_t)__builtin_neon_vshl_nv8qi (__a, __b, 0)
@@ -2115,7 +2115,7 @@
(uint32x2_t)__builtin_neon_vshl_nv2si (__a, __b, 0)
#define vshl_n_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vshl_ndi (__a, __b, 0)
+ (uint64x1_t)__builtin_neon_vshl_nv1di (__a, __b, 0)
#define vshlq_n_s8(__a, __b) \
(int8x16_t)__builtin_neon_vshl_nv16qi (__a, __b, 1)
@@ -2151,7 +2151,7 @@
(int32x2_t)__builtin_neon_vqshl_nv2si (__a, __b, 1)
#define vqshl_n_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vqshl_ndi (__a, __b, 1)
+ (int64x1_t)__builtin_neon_vqshl_nv1di (__a, __b, 1)
#define vqshl_n_u8(__a, __b) \
(uint8x8_t)__builtin_neon_vqshl_nv8qi (__a, __b, 0)
@@ -2163,7 +2163,7 @@
(uint32x2_t)__builtin_neon_vqshl_nv2si (__a, __b, 0)
#define vqshl_n_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vqshl_ndi (__a, __b, 0)
+ (uint64x1_t)__builtin_neon_vqshl_nv1di (__a, __b, 0)
#define vqshlq_n_s8(__a, __b) \
(int8x16_t)__builtin_neon_vqshl_nv16qi (__a, __b, 1)
@@ -2199,7 +2199,7 @@
(uint32x2_t)__builtin_neon_vqshlu_nv2si (__a, __b, 1)
#define vqshlu_n_s64(__a, __b) \
- (uint64x1_t)__builtin_neon_vqshlu_ndi (__a, __b, 1)
+ (uint64x1_t)__builtin_neon_vqshlu_nv1di (__a, __b, 1)
#define vqshluq_n_s8(__a, __b) \
(uint8x16_t)__builtin_neon_vqshlu_nv16qi (__a, __b, 1)
@@ -2241,7 +2241,7 @@
(int32x2_t)__builtin_neon_vsra_nv2si (__a, __b, __c, 1)
#define vsra_n_s64(__a, __b, __c) \
- (int64x1_t)__builtin_neon_vsra_ndi (__a, __b, __c, 1)
+ (int64x1_t)__builtin_neon_vsra_nv1di (__a, __b, __c, 1)
#define vsra_n_u8(__a, __b, __c) \
(uint8x8_t)__builtin_neon_vsra_nv8qi (__a, __b, __c, 0)
@@ -2253,7 +2253,7 @@
(uint32x2_t)__builtin_neon_vsra_nv2si (__a, __b, __c, 0)
#define vsra_n_u64(__a, __b, __c) \
- (uint64x1_t)__builtin_neon_vsra_ndi (__a, __b, __c, 0)
+ (uint64x1_t)__builtin_neon_vsra_nv1di (__a, __b, __c, 0)
#define vsraq_n_s8(__a, __b, __c) \
(int8x16_t)__builtin_neon_vsra_nv16qi (__a, __b, __c, 1)
@@ -2289,7 +2289,7 @@
(int32x2_t)__builtin_neon_vsra_nv2si (__a, __b, __c, 3)
#define vrsra_n_s64(__a, __b, __c) \
- (int64x1_t)__builtin_neon_vsra_ndi (__a, __b, __c, 3)
+ (int64x1_t)__builtin_neon_vsra_nv1di (__a, __b, __c, 3)
#define vrsra_n_u8(__a, __b, __c) \
(uint8x8_t)__builtin_neon_vsra_nv8qi (__a, __b, __c, 2)
@@ -2301,7 +2301,7 @@
(uint32x2_t)__builtin_neon_vsra_nv2si (__a, __b, __c, 2)
#define vrsra_n_u64(__a, __b, __c) \
- (uint64x1_t)__builtin_neon_vsra_ndi (__a, __b, __c, 2)
+ (uint64x1_t)__builtin_neon_vsra_nv1di (__a, __b, __c, 2)
#define vrsraq_n_s8(__a, __b, __c) \
(int8x16_t)__builtin_neon_vsra_nv16qi (__a, __b, __c, 3)
@@ -2337,7 +2337,7 @@
(int32x2_t)__builtin_neon_vsri_nv2si (__a, __b, __c)
#define vsri_n_s64(__a, __b, __c) \
- (int64x1_t)__builtin_neon_vsri_ndi (__a, __b, __c)
+ (int64x1_t)__builtin_neon_vsri_nv1di (__a, __b, __c)
#define vsri_n_u8(__a, __b, __c) \
(uint8x8_t)__builtin_neon_vsri_nv8qi (__a, __b, __c)
@@ -2349,7 +2349,7 @@
(uint32x2_t)__builtin_neon_vsri_nv2si (__a, __b, __c)
#define vsri_n_u64(__a, __b, __c) \
- (uint64x1_t)__builtin_neon_vsri_ndi (__a, __b, __c)
+ (uint64x1_t)__builtin_neon_vsri_nv1di (__a, __b, __c)
#define vsri_n_p8(__a, __b, __c) \
(poly8x8_t)__builtin_neon_vsri_nv8qi (__a, __b, __c)
@@ -2397,7 +2397,7 @@
(int32x2_t)__builtin_neon_vsli_nv2si (__a, __b, __c)
#define vsli_n_s64(__a, __b, __c) \
- (int64x1_t)__builtin_neon_vsli_ndi (__a, __b, __c)
+ (int64x1_t)__builtin_neon_vsli_nv1di (__a, __b, __c)
#define vsli_n_u8(__a, __b, __c) \
(uint8x8_t)__builtin_neon_vsli_nv8qi (__a, __b, __c)
@@ -2409,7 +2409,7 @@
(uint32x2_t)__builtin_neon_vsli_nv2si (__a, __b, __c)
#define vsli_n_u64(__a, __b, __c) \
- (uint64x1_t)__builtin_neon_vsli_ndi (__a, __b, __c)
+ (uint64x1_t)__builtin_neon_vsli_nv1di (__a, __b, __c)
#define vsli_n_p8(__a, __b, __c) \
(poly8x8_t)__builtin_neon_vsli_nv8qi (__a, __b, __c)
@@ -2697,10 +2697,10 @@
(poly16_t)__builtin_neon_vget_lanev4hi (__a, __b, 4)
#define vget_lane_s64(__a, __b) \
- (int64_t)__builtin_neon_vget_lanedi (__a, __b, 1)
+ (int64_t)__builtin_neon_vget_lanev1di (__a, __b, 1)
#define vget_lane_u64(__a, __b) \
- (uint64_t)__builtin_neon_vget_lanedi (__a, __b, 0)
+ (uint64_t)__builtin_neon_vget_lanev1di (__a, __b, 0)
#define vgetq_lane_s8(__a, __b) \
(int8_t)__builtin_neon_vget_lanev16qi (__a, __b, 1)
@@ -2763,10 +2763,10 @@
(poly16x4_t)__builtin_neon_vset_lanev4hi (__a, __b, __c)
#define vset_lane_s64(__a, __b, __c) \
- (int64x1_t)__builtin_neon_vset_lanedi (__a, __b, __c)
+ (int64x1_t)__builtin_neon_vset_lanev1di (__a, __b, __c)
#define vset_lane_u64(__a, __b, __c) \
- (uint64x1_t)__builtin_neon_vset_lanedi (__a, __b, __c)
+ (uint64x1_t)__builtin_neon_vset_lanev1di (__a, __b, __c)
#define vsetq_lane_s8(__a, __b, __c) \
(int8x16_t)__builtin_neon_vset_lanev16qi (__a, __b, __c)
@@ -2811,7 +2811,7 @@
(int32x2_t)__builtin_neon_vcreatev2si (__a)
#define vcreate_s64(__a) \
- (int64x1_t)__builtin_neon_vcreatedi (__a)
+ (int64x1_t)__builtin_neon_vcreatev1di (__a)
#define vcreate_f32(__a) \
(float32x2_t)__builtin_neon_vcreatev2sf (__a)
@@ -2826,7 +2826,7 @@
(uint32x2_t)__builtin_neon_vcreatev2si (__a)
#define vcreate_u64(__a) \
- (uint64x1_t)__builtin_neon_vcreatedi (__a)
+ (uint64x1_t)__builtin_neon_vcreatev1di (__a)
#define vcreate_p8(__a) \
(poly8x8_t)__builtin_neon_vcreatev8qi (__a)
@@ -2862,10 +2862,10 @@
(poly16x4_t)__builtin_neon_vdup_nv4hi (__a)
#define vdup_n_s64(__a) \
- (int64x1_t)__builtin_neon_vdup_ndi (__a)
+ (int64x1_t)__builtin_neon_vdup_nv1di (__a)
#define vdup_n_u64(__a) \
- (uint64x1_t)__builtin_neon_vdup_ndi (__a)
+ (uint64x1_t)__builtin_neon_vdup_nv1di (__a)
#define vdupq_n_s8(__a) \
(int8x16_t)__builtin_neon_vdup_nv16qi (__a)
@@ -2928,10 +2928,10 @@
(poly16x4_t)__builtin_neon_vdup_nv4hi (__a)
#define vmov_n_s64(__a) \
- (int64x1_t)__builtin_neon_vdup_ndi (__a)
+ (int64x1_t)__builtin_neon_vdup_nv1di (__a)
#define vmov_n_u64(__a) \
- (uint64x1_t)__builtin_neon_vdup_ndi (__a)
+ (uint64x1_t)__builtin_neon_vdup_nv1di (__a)
#define vmovq_n_s8(__a) \
(int8x16_t)__builtin_neon_vdup_nv16qi (__a)
@@ -2994,10 +2994,10 @@
(poly16x4_t)__builtin_neon_vdup_lanev4hi (__a, __b)
#define vdup_lane_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vdup_lanedi (__a, __b)
+ (int64x1_t)__builtin_neon_vdup_lanev1di (__a, __b)
#define vdup_lane_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vdup_lanedi (__a, __b)
+ (uint64x1_t)__builtin_neon_vdup_lanev1di (__a, __b)
#define vdupq_lane_s8(__a, __b) \
(int8x16_t)__builtin_neon_vdup_lanev16qi (__a, __b)
@@ -3042,7 +3042,7 @@
(int32x4_t)__builtin_neon_vcombinev2si (__a, __b)
#define vcombine_s64(__a, __b) \
- (int64x2_t)__builtin_neon_vcombinedi (__a, __b)
+ (int64x2_t)__builtin_neon_vcombinev1di (__a, __b)
#define vcombine_f32(__a, __b) \
(float32x4_t)__builtin_neon_vcombinev2sf (__a, __b)
@@ -3057,7 +3057,7 @@
(uint32x4_t)__builtin_neon_vcombinev2si (__a, __b)
#define vcombine_u64(__a, __b) \
- (uint64x2_t)__builtin_neon_vcombinedi (__a, __b)
+ (uint64x2_t)__builtin_neon_vcombinev1di (__a, __b)
#define vcombine_p8(__a, __b) \
(poly8x16_t)__builtin_neon_vcombinev8qi (__a, __b)
@@ -3714,7 +3714,7 @@
(int32x2_t)__builtin_neon_vextv2si (__a, __b, __c)
#define vext_s64(__a, __b, __c) \
- (int64x1_t)__builtin_neon_vextdi (__a, __b, __c)
+ (int64x1_t)__builtin_neon_vextv1di (__a, __b, __c)
#define vext_f32(__a, __b, __c) \
(float32x2_t)__builtin_neon_vextv2sf (__a, __b, __c)
@@ -3729,7 +3729,7 @@
(uint32x2_t)__builtin_neon_vextv2si (__a, __b, __c)
#define vext_u64(__a, __b, __c) \
- (uint64x1_t)__builtin_neon_vextdi (__a, __b, __c)
+ (uint64x1_t)__builtin_neon_vextv1di (__a, __b, __c)
#define vext_p8(__a, __b, __c) \
(poly8x8_t)__builtin_neon_vextv8qi (__a, __b, __c)
@@ -3888,7 +3888,7 @@
(int32x2_t)__builtin_neon_vbslv2si (__a, __b, __c)
#define vbsl_s64(__a, __b, __c) \
- (int64x1_t)__builtin_neon_vbsldi (__a, __b, __c)
+ (int64x1_t)__builtin_neon_vbslv1di (__a, __b, __c)
#define vbsl_f32(__a, __b, __c) \
(float32x2_t)__builtin_neon_vbslv2sf (__a, __b, __c)
@@ -3903,7 +3903,7 @@
(uint32x2_t)__builtin_neon_vbslv2si (__a, __b, __c)
#define vbsl_u64(__a, __b, __c) \
- (uint64x1_t)__builtin_neon_vbsldi (__a, __b, __c)
+ (uint64x1_t)__builtin_neon_vbslv1di (__a, __b, __c)
#define vbsl_p8(__a, __b, __c) \
(poly8x8_t)__builtin_neon_vbslv8qi (__a, __b, __c)
@@ -4332,7 +4332,7 @@
(int32x2_t)__builtin_neon_vld1v2si (__a)
#define vld1_s64(__a) \
- (int64x1_t)__builtin_neon_vld1di (__a)
+ (int64x1_t)__builtin_neon_vld1v1di (__a)
#define vld1_f32(__a) \
(float32x2_t)__builtin_neon_vld1v2sf (__a)
@@ -4347,7 +4347,7 @@
(uint32x2_t)__builtin_neon_vld1v2si (__a)
#define vld1_u64(__a) \
- (uint64x1_t)__builtin_neon_vld1di (__a)
+ (uint64x1_t)__builtin_neon_vld1v1di (__a)
#define vld1_p8(__a) \
(poly8x8_t)__builtin_neon_vld1v8qi (__a)
@@ -4416,10 +4416,10 @@
(poly16x4_t)__builtin_neon_vld1_lanev4hi (__a, __b, __c)
#define vld1_lane_s64(__a, __b, __c) \
- (int64x1_t)__builtin_neon_vld1_lanedi (__a, __b, __c)
+ (int64x1_t)__builtin_neon_vld1_lanev1di (__a, __b, __c)
#define vld1_lane_u64(__a, __b, __c) \
- (uint64x1_t)__builtin_neon_vld1_lanedi (__a, __b, __c)
+ (uint64x1_t)__builtin_neon_vld1_lanev1di (__a, __b, __c)
#define vld1q_lane_s8(__a, __b, __c) \
(int8x16_t)__builtin_neon_vld1_lanev16qi (__a, __b, __c)
@@ -4482,10 +4482,10 @@
(poly16x4_t)__builtin_neon_vld1_dupv4hi (__a)
#define vld1_dup_s64(__a) \
- (int64x1_t)__builtin_neon_vld1_dupdi (__a)
+ (int64x1_t)__builtin_neon_vld1_dupv1di (__a)
#define vld1_dup_u64(__a) \
- (uint64x1_t)__builtin_neon_vld1_dupdi (__a)
+ (uint64x1_t)__builtin_neon_vld1_dupv1di (__a)
#define vld1q_dup_s8(__a) \
(int8x16_t)__builtin_neon_vld1_dupv16qi (__a)
@@ -4530,7 +4530,7 @@
__builtin_neon_vst1v2si (__a, __b)
#define vst1_s64(__a, __b) \
- __builtin_neon_vst1di (__a, __b)
+ __builtin_neon_vst1v1di (__a, __b)
#define vst1_f32(__a, __b) \
__builtin_neon_vst1v2sf (__a, __b)
@@ -4545,7 +4545,7 @@
__builtin_neon_vst1v2si (__a, __b)
#define vst1_u64(__a, __b) \
- __builtin_neon_vst1di (__a, __b)
+ __builtin_neon_vst1v1di (__a, __b)
#define vst1_p8(__a, __b) \
__builtin_neon_vst1v8qi (__a, __b)
@@ -4614,10 +4614,10 @@
__builtin_neon_vst1_lanev4hi (__a, __b, __c)
#define vst1_lane_s64(__a, __b, __c) \
- __builtin_neon_vst1_lanedi (__a, __b, __c)
+ __builtin_neon_vst1_lanev1di (__a, __b, __c)
#define vst1_lane_u64(__a, __b, __c) \
- __builtin_neon_vst1_lanedi (__a, __b, __c)
+ __builtin_neon_vst1_lanev1di (__a, __b, __c)
#define vst1q_lane_s8(__a, __b, __c) \
__builtin_neon_vst1_lanev16qi (__a, __b, __c)
@@ -4718,14 +4718,14 @@
#define vld2_s64(__a) \
({ \
union { int64x1x2_t __i; __builtin_neon_di2 __o; } __rv; \
- __rv.__o = __builtin_neon_vld2di (__a); \
+ __rv.__o = __builtin_neon_vld2v1di (__a); \
__rv.__i; \
})
#define vld2_u64(__a) \
({ \
union { uint64x1x2_t __i; __builtin_neon_di2 __o; } __rv; \
- __rv.__o = __builtin_neon_vld2di (__a); \
+ __rv.__o = __builtin_neon_vld2v1di (__a); \
__rv.__i; \
})
@@ -4978,14 +4978,14 @@
#define vld2_dup_s64(__a) \
({ \
union { int64x1x2_t __i; __builtin_neon_di2 __o; } __rv; \
- __rv.__o = __builtin_neon_vld2_dupdi (__a); \
+ __rv.__o = __builtin_neon_vld2_dupv1di (__a); \
__rv.__i; \
})
#define vld2_dup_u64(__a) \
({ \
union { uint64x1x2_t __i; __builtin_neon_di2 __o; } __rv; \
- __rv.__o = __builtin_neon_vld2_dupdi (__a); \
+ __rv.__o = __builtin_neon_vld2_dupv1di (__a); \
__rv.__i; \
})
@@ -5046,13 +5046,13 @@
#define vst2_s64(__a, __b) \
({ \
union { int64x1x2_t __i; __builtin_neon_di2 __o; } __bu = { __b }; \
- __builtin_neon_vst2di (__a, __bu.__o); \
+ __builtin_neon_vst2v1di (__a, __bu.__o); \
})
#define vst2_u64(__a, __b) \
({ \
union { uint64x1x2_t __i; __builtin_neon_di2 __o; } __bu = { __b }; \
- __builtin_neon_vst2di (__a, __bu.__o); \
+ __builtin_neon_vst2v1di (__a, __bu.__o); \
})
#define vst2q_s8(__a, __b) \
@@ -5265,14 +5265,14 @@
#define vld3_s64(__a) \
({ \
union { int64x1x3_t __i; __builtin_neon_di3 __o; } __rv; \
- __rv.__o = __builtin_neon_vld3di (__a); \
+ __rv.__o = __builtin_neon_vld3v1di (__a); \
__rv.__i; \
})
#define vld3_u64(__a) \
({ \
union { uint64x1x3_t __i; __builtin_neon_di3 __o; } __rv; \
- __rv.__o = __builtin_neon_vld3di (__a); \
+ __rv.__o = __builtin_neon_vld3v1di (__a); \
__rv.__i; \
})
@@ -5525,14 +5525,14 @@
#define vld3_dup_s64(__a) \
({ \
union { int64x1x3_t __i; __builtin_neon_di3 __o; } __rv; \
- __rv.__o = __builtin_neon_vld3_dupdi (__a); \
+ __rv.__o = __builtin_neon_vld3_dupv1di (__a); \
__rv.__i; \
})
#define vld3_dup_u64(__a) \
({ \
union { uint64x1x3_t __i; __builtin_neon_di3 __o; } __rv; \
- __rv.__o = __builtin_neon_vld3_dupdi (__a); \
+ __rv.__o = __builtin_neon_vld3_dupv1di (__a); \
__rv.__i; \
})
@@ -5593,13 +5593,13 @@
#define vst3_s64(__a, __b) \
({ \
union { int64x1x3_t __i; __builtin_neon_di3 __o; } __bu = { __b }; \
- __builtin_neon_vst3di (__a, __bu.__o); \
+ __builtin_neon_vst3v1di (__a, __bu.__o); \
})
#define vst3_u64(__a, __b) \
({ \
union { uint64x1x3_t __i; __builtin_neon_di3 __o; } __bu = { __b }; \
- __builtin_neon_vst3di (__a, __bu.__o); \
+ __builtin_neon_vst3v1di (__a, __bu.__o); \
})
#define vst3q_s8(__a, __b) \
@@ -5812,14 +5812,14 @@
#define vld4_s64(__a) \
({ \
union { int64x1x4_t __i; __builtin_neon_di4 __o; } __rv; \
- __rv.__o = __builtin_neon_vld4di (__a); \
+ __rv.__o = __builtin_neon_vld4v1di (__a); \
__rv.__i; \
})
#define vld4_u64(__a) \
({ \
union { uint64x1x4_t __i; __builtin_neon_di4 __o; } __rv; \
- __rv.__o = __builtin_neon_vld4di (__a); \
+ __rv.__o = __builtin_neon_vld4v1di (__a); \
__rv.__i; \
})
@@ -6072,14 +6072,14 @@
#define vld4_dup_s64(__a) \
({ \
union { int64x1x4_t __i; __builtin_neon_di4 __o; } __rv; \
- __rv.__o = __builtin_neon_vld4_dupdi (__a); \
+ __rv.__o = __builtin_neon_vld4_dupv1di (__a); \
__rv.__i; \
})
#define vld4_dup_u64(__a) \
({ \
union { uint64x1x4_t __i; __builtin_neon_di4 __o; } __rv; \
- __rv.__o = __builtin_neon_vld4_dupdi (__a); \
+ __rv.__o = __builtin_neon_vld4_dupv1di (__a); \
__rv.__i; \
})
@@ -6140,13 +6140,13 @@
#define vst4_s64(__a, __b) \
({ \
union { int64x1x4_t __i; __builtin_neon_di4 __o; } __bu = { __b }; \
- __builtin_neon_vst4di (__a, __bu.__o); \
+ __builtin_neon_vst4v1di (__a, __bu.__o); \
})
#define vst4_u64(__a, __b) \
({ \
union { uint64x1x4_t __i; __builtin_neon_di4 __o; } __bu = { __b }; \
- __builtin_neon_vst4di (__a, __bu.__o); \
+ __builtin_neon_vst4v1di (__a, __bu.__o); \
})
#define vst4q_s8(__a, __b) \
@@ -6303,7 +6303,7 @@
(int32x2_t)__builtin_neon_vandv2si (__a, __b, 1)
#define vand_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vanddi (__a, __b, 1)
+ (int64x1_t)__builtin_neon_vandv1di (__a, __b, 1)
#define vand_u8(__a, __b) \
(uint8x8_t)__builtin_neon_vandv8qi (__a, __b, 0)
@@ -6315,7 +6315,7 @@
(uint32x2_t)__builtin_neon_vandv2si (__a, __b, 0)
#define vand_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vanddi (__a, __b, 0)
+ (uint64x1_t)__builtin_neon_vandv1di (__a, __b, 0)
#define vandq_s8(__a, __b) \
(int8x16_t)__builtin_neon_vandv16qi (__a, __b, 1)
@@ -6351,7 +6351,7 @@
(int32x2_t)__builtin_neon_vorrv2si (__a, __b, 1)
#define vorr_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vorrdi (__a, __b, 1)
+ (int64x1_t)__builtin_neon_vorrv1di (__a, __b, 1)
#define vorr_u8(__a, __b) \
(uint8x8_t)__builtin_neon_vorrv8qi (__a, __b, 0)
@@ -6363,7 +6363,7 @@
(uint32x2_t)__builtin_neon_vorrv2si (__a, __b, 0)
#define vorr_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vorrdi (__a, __b, 0)
+ (uint64x1_t)__builtin_neon_vorrv1di (__a, __b, 0)
#define vorrq_s8(__a, __b) \
(int8x16_t)__builtin_neon_vorrv16qi (__a, __b, 1)
@@ -6399,7 +6399,7 @@
(int32x2_t)__builtin_neon_veorv2si (__a, __b, 1)
#define veor_s64(__a, __b) \
- (int64x1_t)__builtin_neon_veordi (__a, __b, 1)
+ (int64x1_t)__builtin_neon_veorv1di (__a, __b, 1)
#define veor_u8(__a, __b) \
(uint8x8_t)__builtin_neon_veorv8qi (__a, __b, 0)
@@ -6411,7 +6411,7 @@
(uint32x2_t)__builtin_neon_veorv2si (__a, __b, 0)
#define veor_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_veordi (__a, __b, 0)
+ (uint64x1_t)__builtin_neon_veorv1di (__a, __b, 0)
#define veorq_s8(__a, __b) \
(int8x16_t)__builtin_neon_veorv16qi (__a, __b, 1)
@@ -6447,7 +6447,7 @@
(int32x2_t)__builtin_neon_vbicv2si (__a, __b, 1)
#define vbic_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vbicdi (__a, __b, 1)
+ (int64x1_t)__builtin_neon_vbicv1di (__a, __b, 1)
#define vbic_u8(__a, __b) \
(uint8x8_t)__builtin_neon_vbicv8qi (__a, __b, 0)
@@ -6459,7 +6459,7 @@
(uint32x2_t)__builtin_neon_vbicv2si (__a, __b, 0)
#define vbic_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vbicdi (__a, __b, 0)
+ (uint64x1_t)__builtin_neon_vbicv1di (__a, __b, 0)
#define vbicq_s8(__a, __b) \
(int8x16_t)__builtin_neon_vbicv16qi (__a, __b, 1)
@@ -6495,7 +6495,7 @@
(int32x2_t)__builtin_neon_vornv2si (__a, __b, 1)
#define vorn_s64(__a, __b) \
- (int64x1_t)__builtin_neon_vorndi (__a, __b, 1)
+ (int64x1_t)__builtin_neon_vornv1di (__a, __b, 1)
#define vorn_u8(__a, __b) \
(uint8x8_t)__builtin_neon_vornv8qi (__a, __b, 0)
@@ -6507,7 +6507,7 @@
(uint32x2_t)__builtin_neon_vornv2si (__a, __b, 0)
#define vorn_u64(__a, __b) \
- (uint64x1_t)__builtin_neon_vorndi (__a, __b, 0)
+ (uint64x1_t)__builtin_neon_vornv1di (__a, __b, 0)
#define vornq_s8(__a, __b) \
(int8x16_t)__builtin_neon_vornv16qi (__a, __b, 1)
@@ -6544,7 +6544,7 @@
(poly8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a)
#define vreinterpret_p8_s64(__a) \
- (poly8x8_t)__builtin_neon_vreinterpretv8qidi (__a)
+ (poly8x8_t)__builtin_neon_vreinterpretv8qiv1di (__a)
#define vreinterpret_p8_f32(__a) \
(poly8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a)
@@ -6559,7 +6559,7 @@
(poly8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a)
#define vreinterpret_p8_u64(__a) \
- (poly8x8_t)__builtin_neon_vreinterpretv8qidi (__a)
+ (poly8x8_t)__builtin_neon_vreinterpretv8qiv1di (__a)
#define vreinterpret_p8_p16(__a) \
(poly8x8_t)__builtin_neon_vreinterpretv8qiv4hi (__a)
@@ -6604,7 +6604,7 @@
(poly16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a)
#define vreinterpret_p16_s64(__a) \
- (poly16x4_t)__builtin_neon_vreinterpretv4hidi (__a)
+ (poly16x4_t)__builtin_neon_vreinterpretv4hiv1di (__a)
#define vreinterpret_p16_f32(__a) \
(poly16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a)
@@ -6619,7 +6619,7 @@
(poly16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a)
#define vreinterpret_p16_u64(__a) \
- (poly16x4_t)__builtin_neon_vreinterpretv4hidi (__a)
+ (poly16x4_t)__builtin_neon_vreinterpretv4hiv1di (__a)
#define vreinterpret_p16_p8(__a) \
(poly16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a)
@@ -6664,7 +6664,7 @@
(float32x2_t)__builtin_neon_vreinterpretv2sfv2si (__a)
#define vreinterpret_f32_s64(__a) \
- (float32x2_t)__builtin_neon_vreinterpretv2sfdi (__a)
+ (float32x2_t)__builtin_neon_vreinterpretv2sfv1di (__a)
#define vreinterpret_f32_u8(__a) \
(float32x2_t)__builtin_neon_vreinterpretv2sfv8qi (__a)
@@ -6676,7 +6676,7 @@
(float32x2_t)__builtin_neon_vreinterpretv2sfv2si (__a)
#define vreinterpret_f32_u64(__a) \
- (float32x2_t)__builtin_neon_vreinterpretv2sfdi (__a)
+ (float32x2_t)__builtin_neon_vreinterpretv2sfv1di (__a)
#define vreinterpret_f32_p8(__a) \
(float32x2_t)__builtin_neon_vreinterpretv2sfv8qi (__a)
@@ -6715,34 +6715,34 @@
(float32x4_t)__builtin_neon_vreinterpretv4sfv8hi (__a)
#define vreinterpret_s64_s8(__a) \
- (int64x1_t)__builtin_neon_vreinterpretdiv8qi (__a)
+ (int64x1_t)__builtin_neon_vreinterpretv1div8qi (__a)
#define vreinterpret_s64_s16(__a) \
- (int64x1_t)__builtin_neon_vreinterpretdiv4hi (__a)
+ (int64x1_t)__builtin_neon_vreinterpretv1div4hi (__a)
#define vreinterpret_s64_s32(__a) \
- (int64x1_t)__builtin_neon_vreinterpretdiv2si (__a)
+ (int64x1_t)__builtin_neon_vreinterpretv1div2si (__a)
#define vreinterpret_s64_f32(__a) \
- (int64x1_t)__builtin_neon_vreinterpretdiv2sf (__a)
+ (int64x1_t)__builtin_neon_vreinterpretv1div2sf (__a)
#define vreinterpret_s64_u8(__a) \
- (int64x1_t)__builtin_neon_vreinterpretdiv8qi (__a)
+ (int64x1_t)__builtin_neon_vreinterpretv1div8qi (__a)
#define vreinterpret_s64_u16(__a) \
- (int64x1_t)__builtin_neon_vreinterpretdiv4hi (__a)
+ (int64x1_t)__builtin_neon_vreinterpretv1div4hi (__a)
#define vreinterpret_s64_u32(__a) \
- (int64x1_t)__builtin_neon_vreinterpretdiv2si (__a)
+ (int64x1_t)__builtin_neon_vreinterpretv1div2si (__a)
#define vreinterpret_s64_u64(__a) \
- (int64x1_t)__builtin_neon_vreinterpretdidi (__a)
+ (int64x1_t)__builtin_neon_vreinterpretv1div1di (__a)
#define vreinterpret_s64_p8(__a) \
- (int64x1_t)__builtin_neon_vreinterpretdiv8qi (__a)
+ (int64x1_t)__builtin_neon_vreinterpretv1div8qi (__a)
#define vreinterpret_s64_p16(__a) \
- (int64x1_t)__builtin_neon_vreinterpretdiv4hi (__a)
+ (int64x1_t)__builtin_neon_vreinterpretv1div4hi (__a)
#define vreinterpretq_s64_s8(__a) \
(int64x2_t)__builtin_neon_vreinterpretv2div16qi (__a)
@@ -6775,34 +6775,34 @@
(int64x2_t)__builtin_neon_vreinterpretv2div8hi (__a)
#define vreinterpret_u64_s8(__a) \
- (uint64x1_t)__builtin_neon_vreinterpretdiv8qi (__a)
+ (uint64x1_t)__builtin_neon_vreinterpretv1div8qi (__a)
#define vreinterpret_u64_s16(__a) \
- (uint64x1_t)__builtin_neon_vreinterpretdiv4hi (__a)
+ (uint64x1_t)__builtin_neon_vreinterpretv1div4hi (__a)
#define vreinterpret_u64_s32(__a) \
- (uint64x1_t)__builtin_neon_vreinterpretdiv2si (__a)
+ (uint64x1_t)__builtin_neon_vreinterpretv1div2si (__a)
#define vreinterpret_u64_s64(__a) \
- (uint64x1_t)__builtin_neon_vreinterpretdidi (__a)
+ (uint64x1_t)__builtin_neon_vreinterpretv1div1di (__a)
#define vreinterpret_u64_f32(__a) \
- (uint64x1_t)__builtin_neon_vreinterpretdiv2sf (__a)
+ (uint64x1_t)__builtin_neon_vreinterpretv1div2sf (__a)
#define vreinterpret_u64_u8(__a) \
- (uint64x1_t)__builtin_neon_vreinterpretdiv8qi (__a)
+ (uint64x1_t)__builtin_neon_vreinterpretv1div8qi (__a)
#define vreinterpret_u64_u16(__a) \
- (uint64x1_t)__builtin_neon_vreinterpretdiv4hi (__a)
+ (uint64x1_t)__builtin_neon_vreinterpretv1div4hi (__a)
#define vreinterpret_u64_u32(__a) \
- (uint64x1_t)__builtin_neon_vreinterpretdiv2si (__a)
+ (uint64x1_t)__builtin_neon_vreinterpretv1div2si (__a)
#define vreinterpret_u64_p8(__a) \
- (uint64x1_t)__builtin_neon_vreinterpretdiv8qi (__a)
+ (uint64x1_t)__builtin_neon_vreinterpretv1div8qi (__a)
#define vreinterpret_u64_p16(__a) \
- (uint64x1_t)__builtin_neon_vreinterpretdiv4hi (__a)
+ (uint64x1_t)__builtin_neon_vreinterpretv1div4hi (__a)
#define vreinterpretq_u64_s8(__a) \
(uint64x2_t)__builtin_neon_vreinterpretv2div16qi (__a)
@@ -6841,7 +6841,7 @@
(int8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a)
#define vreinterpret_s8_s64(__a) \
- (int8x8_t)__builtin_neon_vreinterpretv8qidi (__a)
+ (int8x8_t)__builtin_neon_vreinterpretv8qiv1di (__a)
#define vreinterpret_s8_f32(__a) \
(int8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a)
@@ -6856,7 +6856,7 @@
(int8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a)
#define vreinterpret_s8_u64(__a) \
- (int8x8_t)__builtin_neon_vreinterpretv8qidi (__a)
+ (int8x8_t)__builtin_neon_vreinterpretv8qiv1di (__a)
#define vreinterpret_s8_p8(__a) \
(int8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a)
@@ -6901,7 +6901,7 @@
(int16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a)
#define vreinterpret_s16_s64(__a) \
- (int16x4_t)__builtin_neon_vreinterpretv4hidi (__a)
+ (int16x4_t)__builtin_neon_vreinterpretv4hiv1di (__a)
#define vreinterpret_s16_f32(__a) \
(int16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a)
@@ -6916,7 +6916,7 @@
(int16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a)
#define vreinterpret_s16_u64(__a) \
- (int16x4_t)__builtin_neon_vreinterpretv4hidi (__a)
+ (int16x4_t)__builtin_neon_vreinterpretv4hiv1di (__a)
#define vreinterpret_s16_p8(__a) \
(int16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a)
@@ -6961,7 +6961,7 @@
(int32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a)
#define vreinterpret_s32_s64(__a) \
- (int32x2_t)__builtin_neon_vreinterpretv2sidi (__a)
+ (int32x2_t)__builtin_neon_vreinterpretv2siv1di (__a)
#define vreinterpret_s32_f32(__a) \
(int32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a)
@@ -6976,7 +6976,7 @@
(int32x2_t)__builtin_neon_vreinterpretv2siv2si (__a)
#define vreinterpret_s32_u64(__a) \
- (int32x2_t)__builtin_neon_vreinterpretv2sidi (__a)
+ (int32x2_t)__builtin_neon_vreinterpretv2siv1di (__a)
#define vreinterpret_s32_p8(__a) \
(int32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a)
@@ -7024,7 +7024,7 @@
(uint8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a)
#define vreinterpret_u8_s64(__a) \
- (uint8x8_t)__builtin_neon_vreinterpretv8qidi (__a)
+ (uint8x8_t)__builtin_neon_vreinterpretv8qiv1di (__a)
#define vreinterpret_u8_f32(__a) \
(uint8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a)
@@ -7036,7 +7036,7 @@
(uint8x8_t)__builtin_neon_vreinterpretv8qiv2si (__a)
#define vreinterpret_u8_u64(__a) \
- (uint8x8_t)__builtin_neon_vreinterpretv8qidi (__a)
+ (uint8x8_t)__builtin_neon_vreinterpretv8qiv1di (__a)
#define vreinterpret_u8_p8(__a) \
(uint8x8_t)__builtin_neon_vreinterpretv8qiv8qi (__a)
@@ -7084,7 +7084,7 @@
(uint16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a)
#define vreinterpret_u16_s64(__a) \
- (uint16x4_t)__builtin_neon_vreinterpretv4hidi (__a)
+ (uint16x4_t)__builtin_neon_vreinterpretv4hiv1di (__a)
#define vreinterpret_u16_f32(__a) \
(uint16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a)
@@ -7096,7 +7096,7 @@
(uint16x4_t)__builtin_neon_vreinterpretv4hiv2si (__a)
#define vreinterpret_u16_u64(__a) \
- (uint16x4_t)__builtin_neon_vreinterpretv4hidi (__a)
+ (uint16x4_t)__builtin_neon_vreinterpretv4hiv1di (__a)
#define vreinterpret_u16_p8(__a) \
(uint16x4_t)__builtin_neon_vreinterpretv4hiv8qi (__a)
@@ -7144,7 +7144,7 @@
(uint32x2_t)__builtin_neon_vreinterpretv2siv2si (__a)
#define vreinterpret_u32_s64(__a) \
- (uint32x2_t)__builtin_neon_vreinterpretv2sidi (__a)
+ (uint32x2_t)__builtin_neon_vreinterpretv2siv1di (__a)
#define vreinterpret_u32_f32(__a) \
(uint32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a)
@@ -7156,7 +7156,7 @@
(uint32x2_t)__builtin_neon_vreinterpretv2siv4hi (__a)
#define vreinterpret_u32_u64(__a) \
- (uint32x2_t)__builtin_neon_vreinterpretv2sidi (__a)
+ (uint32x2_t)__builtin_neon_vreinterpretv2siv1di (__a)
#define vreinterpret_u32_p8(__a) \
(uint32x2_t)__builtin_neon_vreinterpretv2siv8qi (__a)
Modified: llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp?rev=80638&r1=80637&r2=80638&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp (original)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/llvm-arm.cpp Mon Aug 31 18:16:02 2009
@@ -423,7 +423,7 @@
case NEON_BUILTIN_vreinterpretv4hi:
case NEON_BUILTIN_vreinterpretv2si:
case NEON_BUILTIN_vreinterpretv2sf:
- case NEON_BUILTIN_vreinterpretdi:
+ case NEON_BUILTIN_vreinterpretv1di:
case NEON_BUILTIN_vreinterpretv16qi:
case NEON_BUILTIN_vreinterpretv8hi:
case NEON_BUILTIN_vreinterpretv4si:
@@ -669,7 +669,7 @@
case NEON_BUILTIN_vreinterpretv4hi:
case NEON_BUILTIN_vreinterpretv2si:
case NEON_BUILTIN_vreinterpretv2sf:
- case NEON_BUILTIN_vreinterpretdi:
+ case NEON_BUILTIN_vreinterpretv1di:
allow_128bit_modes = false;
allow_64bit_elements = true;
break;
@@ -715,7 +715,7 @@
if (modeCheckOpnd >= 0) {
switch (insn_data[icode].operand[modeCheckOpnd].mode) {
- case V8QImode: case V4HImode: case V2SImode: case DImode: case V2SFmode:
+ case V8QImode: case V4HImode: case V2SImode: case V1DImode: case V2SFmode:
if (!allow_64bit_modes)
return BadModeError(exp, Result);
break;
@@ -765,7 +765,7 @@
if (!allow_32bit_elements)
return BadModeError(exp, Result);
break;
- case DImode: case V2DImode:
+ case V1DImode: case V2DImode:
if (!allow_64bit_elements)
return BadModeError(exp, Result);
break;
@@ -1992,7 +1992,7 @@
case NEON_BUILTIN_vreinterpretv4hi:
case NEON_BUILTIN_vreinterpretv2si:
case NEON_BUILTIN_vreinterpretv2sf:
- case NEON_BUILTIN_vreinterpretdi:
+ case NEON_BUILTIN_vreinterpretv1di:
case NEON_BUILTIN_vreinterpretv16qi:
case NEON_BUILTIN_vreinterpretv8hi:
case NEON_BUILTIN_vreinterpretv4si:
Modified: llvm-gcc-4.2/trunk/gcc/config/arm/neon-gen.ml
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/neon-gen.ml?rev=80638&r1=80637&r2=80638&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/neon-gen.ml (original)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/neon-gen.ml Mon Aug 31 18:16:02 2009
@@ -345,8 +345,9 @@
(fun (cbase, abase, esize, enum) ->
let attr =
match enum with
- 1 -> ""
- | _ -> Printf.sprintf "\t__attribute__ ((__vector_size__ (%d)))"
+(* LLVM LOCAL begin *)
+(* LLVM LOCAL end *)
+ _ -> Printf.sprintf "\t__attribute__ ((__vector_size__ (%d)))"
(esize * enum / 8) in
Format.printf "typedef %s %s%dx%d_t%s;@\n" cbase abase esize enum attr)
typeinfo;
Modified: llvm-gcc-4.2/trunk/gcc/config/arm/neon.md
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/neon.md?rev=80638&r1=80637&r2=80638&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/neon.md (original)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/neon.md Mon Aug 31 18:16:02 2009
@@ -168,7 +168,8 @@
(define_mode_macro VD [V8QI V4HI V2SI V2SF])
;; Double-width vector modes plus 64-bit elements.
-(define_mode_macro VDX [V8QI V4HI V2SI V2SF DI])
+;; LLVM LOCAL
+(define_mode_macro VDX [V8QI V4HI V2SI V2SF V1DI])
;; Same, without floating-point elements.
(define_mode_macro VDI [V8QI V4HI V2SI])
@@ -203,8 +204,9 @@
;; Narrowable modes.
(define_mode_macro VN [V8HI V4SI V2DI])
-;; All supported vector modes (except singleton DImode).
-(define_mode_macro VDQ [V8QI V16QI V4HI V8HI V2SI V4SI V2SF V4SF V2DI])
+;; All supported vector modes.
+;; LLVM LOCAL
+(define_mode_macro VDQ [V8QI V16QI V4HI V8HI V2SI V4SI V2SF V4SF V2DI V1DI])
;; All supported vector modes (except those with 64-bit integer elements).
(define_mode_macro VDQW [V8QI V16QI V4HI V8HI V2SI V4SI V2SF V4SF])
@@ -212,14 +214,17 @@
;; Supported integer vector modes (not 64 bit elements).
(define_mode_macro VDQIW [V8QI V16QI V4HI V8HI V2SI V4SI])
-;; Supported integer vector modes (not singleton DI)
-(define_mode_macro VDQI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI])
+;; Supported integer vector modes
+;; LLVM LOCAL
+(define_mode_macro VDQI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI V1DI])
;; Vector modes, including 64-bit integer elements.
-(define_mode_macro VDQX [V8QI V16QI V4HI V8HI V2SI V4SI V2SF V4SF DI V2DI])
+;; LLVM LOCAL
+(define_mode_macro VDQX [V8QI V16QI V4HI V8HI V2SI V4SI V2SF V4SF V1DI V2DI])
;; Vector modes including 64-bit integer elements, but no floats.
-(define_mode_macro VDQIX [V8QI V16QI V4HI V8HI V2SI V4SI DI V2DI])
+;; LLVM LOCAL
+(define_mode_macro VDQIX [V8QI V16QI V4HI V8HI V2SI V4SI V1DI V2DI])
;; Vector modes for float->int conversions.
(define_mode_macro VCVTF [V2SF V4SF])
@@ -252,7 +257,8 @@
(define_mode_macro VE [V8QI V16QI])
;; Modes with 64-bit elements only.
-(define_mode_macro V64 [DI V2DI])
+;; LLVM LOCAL
+(define_mode_macro V64 [V1DI V2DI])
;; Modes with 32-bit elements only.
(define_mode_macro V32 [V2SI V2SF V4SI V4SF])
@@ -266,7 +272,8 @@
(V4HI "HI") (V8HI "HI")
(V2SI "SI") (V4SI "SI")
(V2SF "SF") (V4SF "SF")
- (DI "DI") (V2DI "DI")])
+;; LLVM LOCAL
+ (V1DI "DI") (V2DI "DI")])
;; Mode of pair of elements for each vector mode, to define transfer
;; size for structure lane/dup loads and stores.
@@ -274,7 +281,8 @@
(V4HI "SI") (V8HI "SI")
(V2SI "V2SI") (V4SI "V2SI")
(V2SF "V2SF") (V4SF "V2SF")
- (DI "V2DI") (V2DI "V2DI")])
+;; LLVM LOCAL
+ (V1DI "V2DI") (V2DI "V2DI")])
;; Similar, for three elements.
;; ??? Should we define extra modes so that sizes of all three-element
@@ -283,21 +291,24 @@
(V4HI "V4HI") (V8HI "V4HI")
(V2SI "V4SI") (V4SI "V4SI")
(V2SF "V4SF") (V4SF "V4SF")
- (DI "EI") (V2DI "EI")])
+;; LLVM LOCAL
+ (V1DI "EI") (V2DI "EI")])
;; Similar, for four elements.
(define_mode_attr V_four_elem [(V8QI "SI") (V16QI "SI")
(V4HI "V4HI") (V8HI "V4HI")
(V2SI "V4SI") (V4SI "V4SI")
(V2SF "V4SF") (V4SF "V4SF")
- (DI "OI") (V2DI "OI")])
+;; LLVM LOCAL
+ (V1DI "OI") (V2DI "OI")])
;; Register width from element mode
(define_mode_attr V_reg [(V8QI "P") (V16QI "q")
(V4HI "P") (V8HI "q")
(V2SI "P") (V4SI "q")
(V2SF "P") (V4SF "q")
- (DI "P") (V2DI "q")])
+;; LLVM LOCAL
+ (V1DI "P") (V2DI "q")])
;; Wider modes with the same number of elements.
(define_mode_attr V_widen [(V8QI "V8HI") (V4HI "V4SI") (V2SI "V2DI")])
@@ -308,55 +319,64 @@
;; Modes with half the number of equal-sized elements.
(define_mode_attr V_HALF [(V16QI "V8QI") (V8HI "V4HI")
(V4SI "V2SI") (V4SF "V2SF")
- (V2DI "DI")])
+;; LLVM LOCAL
+ (V2DI "V1DI")])
;; Same, but lower-case.
(define_mode_attr V_half [(V16QI "v8qi") (V8HI "v4hi")
(V4SI "v2si") (V4SF "v2sf")
- (V2DI "di")])
+;; LLVM LOCAL
+ (V2DI "v1di")])
;; Modes with twice the number of equal-sized elements.
(define_mode_attr V_DOUBLE [(V8QI "V16QI") (V4HI "V8HI")
(V2SI "V4SI") (V2SF "V4SF")
- (DI "V2DI")])
+;; LLVM LOCAL
+ (V1DI "V2DI")])
;; Same, but lower-case.
(define_mode_attr V_double [(V8QI "v16qi") (V4HI "v8hi")
(V2SI "v4si") (V2SF "v4sf")
- (DI "v2di")])
+;; LLVM LOCAL
+ (V1DI "v2di")])
;; Modes with double-width elements.
(define_mode_attr V_double_width [(V8QI "V4HI") (V16QI "V8HI")
(V4HI "V2SI") (V8HI "V4SI")
- (V2SI "DI") (V4SI "V2DI")])
+;; LLVM LOCAL
+ (V2SI "V1DI") (V4SI "V2DI")])
;; Mode of result of comparison operations (and bit-select operand 1).
(define_mode_attr V_cmp_result [(V8QI "V8QI") (V16QI "V16QI")
(V4HI "V4HI") (V8HI "V8HI")
(V2SI "V2SI") (V4SI "V4SI")
(V2SF "V2SI") (V4SF "V4SI")
- (DI "DI") (V2DI "V2DI")])
+;; LLVM LOCAL
+ (V1DI "V1DI") (V2DI "V2DI")])
;; Get element type from double-width mode, for operations where we don't care
;; about signedness.
(define_mode_attr V_if_elem [(V8QI "i8") (V16QI "i8")
(V4HI "i16") (V8HI "i16")
(V2SI "i32") (V4SI "i32")
- (DI "i64") (V2DI "i64")
+;; LLVM LOCAL
+ (V1DI "i64") (V2DI "i64")
(V2SF "f32") (V4SF "f32")])
;; Same, but for operations which work on signed values.
(define_mode_attr V_s_elem [(V8QI "s8") (V16QI "s8")
(V4HI "s16") (V8HI "s16")
(V2SI "s32") (V4SI "s32")
- (DI "s64") (V2DI "s64")
+;; LLVM LOCAL
+ (V1DI "s64") (V2DI "s64")
(V2SF "f32") (V4SF "f32")])
;; Same, but for operations which work on unsigned values.
(define_mode_attr V_u_elem [(V8QI "u8") (V16QI "u8")
(V4HI "u16") (V8HI "u16")
(V2SI "u32") (V4SI "u32")
- (DI "u64") (V2DI "u64")
+;; LLVM LOCAL
+ (V1DI "u64") (V2DI "u64")
(V2SF "f32") (V4SF "f32")])
;; Element types for extraction of unsigned scalars.
@@ -368,7 +388,8 @@
(define_mode_attr V_sz_elem [(V8QI "8") (V16QI "8")
(V4HI "16") (V8HI "16")
(V2SI "32") (V4SI "32")
- (DI "64") (V2DI "64")
+;; LLVM LOCAL
+ (V1DI "64") (V2DI "64")
(V2SF "32") (V4SF "32")])
;; Element sizes for duplicating ARM registers to all elements of a vector.
@@ -379,14 +400,16 @@
(V4HI "TI") (V8HI "OI")
(V2SI "TI") (V4SI "OI")
(V2SF "TI") (V4SF "OI")
- (DI "TI") (V2DI "OI")])
+;; LLVM LOCAL
+ (V1DI "TI") (V2DI "OI")])
;; Same, but lower-case.
(define_mode_attr V_pair [(V8QI "ti") (V16QI "oi")
(V4HI "ti") (V8HI "oi")
(V2SI "ti") (V4SI "oi")
(V2SF "ti") (V4SF "oi")
- (DI "ti") (V2DI "oi")])
+;; LLVM LOCAL
+ (V1DI "ti") (V2DI "oi")])
;; Operations on two halves of a quadword vector.
(define_code_macro vqh_ops [plus smin smax umin umax])
@@ -408,7 +431,8 @@
(V4HI "") (V8HI "")
(V2SI "") (V4SI "")
(V2SF "") (V4SF "")
- (DI "_neon") (V2DI "")])
+;; LLVM LOCAL
+ (V1DI "") (V2DI "")])
;; Scalars to be presented to scalar multiplication instructions
;; must satisfy the following constraints.
@@ -496,27 +520,31 @@
(define_mode_attr Is_float_mode [(V8QI "false") (V16QI "false")
(V4HI "false") (V8HI "false")
(V2SI "false") (V4SI "false")
- (V2SF "true") (V4SF "true")
- (DI "false") (V2DI "false")])
+ (V2SF "true") (V4SF "true")
+;; LLVM LOCAL
+ (V1DI "false") (V2DI "false")])
-(define_mode_attr Scalar_mul_8_16 [(V8QI "true") (V16QI "true")
- (V4HI "true") (V8HI "true")
+(define_mode_attr Scalar_mul_8_16 [(V8QI "true") (V16QI "true")
+ (V4HI "true") (V8HI "true")
(V2SI "false") (V4SI "false")
(V2SF "false") (V4SF "false")
- (DI "false") (V2DI "false")])
+;; LLVM LOCAL
+ (V1DI "false") (V2DI "false")])
(define_mode_attr Is_d_reg [(V8QI "true") (V16QI "false")
(V4HI "true") (V8HI "false")
(V2SI "true") (V4SI "false")
(V2SF "true") (V4SF "false")
- (DI "true") (V2DI "false")])
+;; LLVM LOCAL
+ (V1DI "true") (V2DI "false")])
(define_mode_attr V_mode_nunits [(V8QI "8") (V16QI "16")
(V4HI "4") (V8HI "8")
(V2SI "2") (V4SI "4")
(V2SF "2") (V4SF "4")
- (DI "1") (V2DI "2")])
+;; LLVM LOCAL
+ (V1DI "1") (V2DI "2")])
;; FIXME: Attributes are probably borked.
(define_insn "*neon_mov<mode>"
@@ -866,11 +894,8 @@
;; Doubleword and quadword arithmetic.
-;; NOTE: vadd/vsub and some other instructions also support 64-bit integer
-;; element size, which we could potentially use for "long long" operations. We
-;; don't want to do this at present though, because moving values from the
-;; vector unit to the ARM core is currently slow and 64-bit addition (etc.) is
-;; easy to do with ARM instructions anyway.
+;; LLVM LOCAL begin
+;; LLVM LOCAL end
(define_insn "*add<mode>3_neon"
[(set (match_operand:VDQ 0 "s_register_operand" "=w")
@@ -938,23 +963,8 @@
[(set_attr "neon_type" "neon_int_1")]
)
-(define_insn "iordi3_neon"
- [(set (match_operand:DI 0 "s_register_operand" "=w,w")
- (unspec:DI [(match_operand:DI 1 "s_register_operand" "w,0")
- (match_operand:DI 2 "neon_logic_op2" "w,Dl")]
- UNSPEC_VORR))]
- "TARGET_NEON"
-{
- switch (which_alternative)
- {
- case 0: return "vorr\t%P0, %P1, %P2";
- case 1: return neon_output_logic_immediate ("vorr", &operands[2],
- DImode, 0, VALID_NEON_QREG_MODE (DImode));
- default: gcc_unreachable ();
- }
-}
- [(set_attr "neon_type" "neon_int_1")]
-)
+;; LLVM LOCAL begin
+;; LLVM LOCAL end
;; The concrete forms of the Neon immediate-logic instructions are vbic and
;; vorr. We support the pseudo-instruction vand instead, because that
@@ -978,23 +988,8 @@
[(set_attr "neon_type" "neon_int_1")]
)
-(define_insn "anddi3_neon"
- [(set (match_operand:DI 0 "s_register_operand" "=w,w")
- (unspec:DI [(match_operand:DI 1 "s_register_operand" "w,0")
- (match_operand:DI 2 "neon_inv_logic_op2" "w,DL")]
- UNSPEC_VAND))]
- "TARGET_NEON"
-{
- switch (which_alternative)
- {
- case 0: return "vand\t%P0, %P1, %P2";
- case 1: return neon_output_logic_immediate ("vand", &operands[2],
- DImode, 1, VALID_NEON_QREG_MODE (DImode));
- default: gcc_unreachable ();
- }
-}
- [(set_attr "neon_type" "neon_int_1")]
-)
+;; LLVM LOCAL begin
+;; LLVM LOCAL end
(define_insn "orn<mode>3_neon"
[(set (match_operand:VDQ 0 "s_register_operand" "=w")
@@ -1005,15 +1000,8 @@
[(set_attr "neon_type" "neon_int_1")]
)
-(define_insn "orndi3_neon"
- [(set (match_operand:DI 0 "s_register_operand" "=w")
- (unspec:DI [(match_operand:DI 1 "s_register_operand" "w")
- (match_operand:DI 2 "s_register_operand" "w")]
- UNSPEC_VORN))]
- "TARGET_NEON"
- "vorn\t%P0, %P1, %P2"
- [(set_attr "neon_type" "neon_int_1")]
-)
+;; LLVM LOCAL begin
+;; LLVM LOCAL end
(define_insn "bic<mode>3_neon"
[(set (match_operand:VDQ 0 "s_register_operand" "=w")
@@ -1024,15 +1012,8 @@
[(set_attr "neon_type" "neon_int_1")]
)
-(define_insn "bicdi3_neon"
- [(set (match_operand:DI 0 "s_register_operand" "=w")
- (unspec:DI [(match_operand:DI 1 "s_register_operand" "w")
- (match_operand:DI 2 "s_register_operand" "w")]
- UNSPEC_VBIC))]
- "TARGET_NEON"
- "vbic\t%P0, %P1, %P2"
- [(set_attr "neon_type" "neon_int_1")]
-)
+;; LLVM LOCAL begin
+;; LLVM LOCAL end
(define_insn "xor<mode>3"
[(set (match_operand:VDQ 0 "s_register_operand" "=w")
@@ -1043,15 +1024,8 @@
[(set_attr "neon_type" "neon_int_1")]
)
-(define_insn "xordi3_neon"
- [(set (match_operand:DI 0 "s_register_operand" "=w")
- (unspec:DI [(match_operand:DI 1 "s_register_operand" "w")
- (match_operand:DI 2 "s_register_operand" "w")]
- UNSPEC_VEOR))]
- "TARGET_NEON"
- "veor\t%P0, %P1, %P2"
- [(set_attr "neon_type" "neon_int_1")]
-)
+;; LLVM LOCAL begin
+;; LLVM LOCAL end
(define_insn "one_cmpl<mode>2"
[(set (match_operand:VDQ 0 "s_register_operand" "=w")
@@ -1478,6 +1452,17 @@
DONE;
})
+;; LLVM LOCAL begin
+(define_insn "reduc_splus_v1di"
+ [(set (match_operand:V1DI 0 "s_register_operand" "=w")
+ (unspec:V1DI [(match_operand:V1DI 1 "s_register_operand" "w")]
+ UNSPEC_VPADD))]
+ "TARGET_NEON"
+ "vadd.i64\t%P0, %e1, %f1"
+ [(set_attr "neon_type" "neon_int_1")]
+)
+;; LLVM LOCAL end
+
(define_insn "reduc_splus_v2di"
[(set (match_operand:V2DI 0 "s_register_operand" "=w")
(unspec:V2DI [(match_operand:V2DI 1 "s_register_operand" "w")]
@@ -2452,9 +2437,10 @@
; with this insn. Operand 3 (info word) is ignored because it does nothing
; useful with 64-bit elements.
-(define_insn "neon_vget_lanedi"
+;; LLVM LOCAL begin
+(define_insn "neon_vget_lanev1di"
[(set (match_operand:DI 0 "s_register_operand" "=r")
- (unspec:DI [(match_operand:DI 1 "s_register_operand" "w")
+ (unspec:DI [(match_operand:V1DI 1 "s_register_operand" "w")
(match_operand:SI 2 "immediate_operand" "i")
(match_operand:SI 3 "immediate_operand" "i")]
UNSPEC_VGET_LANE))]
@@ -2463,6 +2449,7 @@
[(set_attr "predicable" "yes")
(set_attr "neon_type" "neon_bp_simple")]
)
+;; LLVM LOCAL end
(define_insn "neon_vget_lane<mode>"
[(set (match_operand:<V_elem> 0 "s_register_operand" "=r")
@@ -2525,17 +2512,19 @@
; See neon_vget_lanedi comment for reasons operands 2 & 3 are ignored.
-(define_insn "neon_vset_lanedi"
- [(set (match_operand:DI 0 "s_register_operand" "=w")
- (unspec:DI [(match_operand:DI 1 "s_register_operand" "r")
- (match_operand:DI 2 "s_register_operand" "0")
- (match_operand:SI 3 "immediate_operand" "i")]
+;; LLVM LOCAL begin
+(define_insn "neon_vset_lanev1di"
+ [(set (match_operand:V1DI 0 "s_register_operand" "=w")
+ (unspec:V1DI [(match_operand:DI 1 "s_register_operand" "r")
+ (match_operand:V1DI 2 "s_register_operand" "0")
+ (match_operand:SI 3 "immediate_operand" "i")]
UNSPEC_VSET_LANE))]
"TARGET_NEON"
"vmov%?\t%P0, %Q1, %R1 @ di"
[(set_attr "predicable" "yes")
(set_attr "neon_type" "neon_bp_simple")]
)
+;; LLVM LOCAL end
(define_insn "neon_vset_lane<mode>"
[(set (match_operand:VQ 0 "s_register_operand" "=w")
@@ -2604,15 +2593,17 @@
(set_attr "neon_type" "neon_bp_simple")]
)
-(define_insn "neon_vdup_ndi"
- [(set (match_operand:DI 0 "s_register_operand" "=w")
- (unspec:DI [(match_operand:DI 1 "s_register_operand" "r")]
- UNSPEC_VDUP_N))]
+;; LLVM LOCAL begin
+(define_insn "neon_vdup_nv1di"
+ [(set (match_operand:V1DI 0 "s_register_operand" "=w")
+ (unspec:V1DI [(match_operand:DI 1 "s_register_operand" "r")]
+ UNSPEC_VDUP_N))]
"TARGET_NEON"
"vmov%?\t%P0, %Q1, %R1"
[(set_attr "predicable" "yes")
(set_attr "neon_type" "neon_bp_simple")]
)
+;; LLVM LOCAL end
(define_insn "neon_vdup_nv2di"
[(set (match_operand:V2DI 0 "s_register_operand" "=w")
@@ -2648,21 +2639,24 @@
)
; Scalar index is ignored, since only zero is valid here.
-(define_expand "neon_vdup_lanedi"
- [(set (match_operand:DI 0 "s_register_operand" "=w")
- (unspec:DI [(match_operand:DI 1 "s_register_operand" "w")
- (match_operand:SI 2 "immediate_operand" "i")]
- UNSPEC_VDUP_LANE))]
+;; LLVM LOCAL begin
+(define_expand "neon_vdup_lanev1di"
+ [(set (match_operand:V1DI 0 "s_register_operand" "=w")
+ (unspec:V1DI [(match_operand:V1DI 1 "s_register_operand" "w")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_VDUP_LANE))]
"TARGET_NEON"
{
emit_move_insn (operands[0], operands[1]);
DONE;
})
+;; LLVM LOCAL end
; Likewise.
(define_insn "neon_vdup_lanev2di"
[(set (match_operand:V2DI 0 "s_register_operand" "=w")
- (unspec:V2DI [(match_operand:DI 1 "s_register_operand" "w")
+;; LLVM LOCAL
+ (unspec:V2DI [(match_operand:V1DI 1 "s_register_operand" "w")
(match_operand:SI 2 "immediate_operand" "i")]
UNSPEC_VDUP_LANE))]
"TARGET_NEON"
@@ -3809,14 +3803,16 @@
DONE;
})
-(define_expand "neon_vreinterpretdi<mode>"
- [(match_operand:DI 0 "s_register_operand" "")
+;; LLVM LOCAL begin
+(define_expand "neon_vreinterpretv1di<mode>"
+ [(match_operand:V1DI 0 "s_register_operand" "")
(match_operand:VDX 1 "s_register_operand" "")]
"TARGET_NEON"
{
neon_reinterpret (operands[0], operands[1]);
DONE;
})
+;; LLVM LOCAL end
(define_expand "neon_vreinterpretv16qi<mode>"
[(match_operand:V16QI 0 "s_register_operand" "")
Modified: llvm-gcc-4.2/trunk/gcc/config/arm/neon.ml
URL: http://llvm.org/viewvc/llvm-project/llvm-gcc-4.2/trunk/gcc/config/arm/neon.ml?rev=80638&r1=80637&r2=80638&view=diff
==============================================================================
--- llvm-gcc-4.2/trunk/gcc/config/arm/neon.ml (original)
+++ llvm-gcc-4.2/trunk/gcc/config/arm/neon.ml Mon Aug 31 18:16:02 2009
@@ -104,9 +104,10 @@
| Arity3 of vectype * vectype * vectype * vectype
| Arity4 of vectype * vectype * vectype * vectype * vectype
-type vecmode = V8QI | V4HI | V2SI | V2SF | DI
+(* LLVM LOCAL *)
+type vecmode = V8QI | V4HI | V2SI | V2SF | V1DI
| V16QI | V8HI | V4SI | V4SF | V2DI
- | QI | HI | SI | SF
+ | QI | HI | SI | SF | DI
type opcode =
(* Binary ops. *)
@@ -355,7 +356,8 @@
in match shape with
All (_, Dreg) | By_scalar Dreg | Pair_result Dreg | Unary_scalar Dreg
| Binary_imm Dreg | Long_noreg Dreg | Wide_noreg Dreg ->
- [| V8QI; V4HI; if flt then V2SF else V2SI; DI |].(idx)
+(* LLVM LOCAL *)
+ [| V8QI; V4HI; if flt then V2SF else V2SI; V1DI |].(idx)
| All (_, Qreg) | By_scalar Qreg | Pair_result Qreg | Unary_scalar Qreg
| Binary_imm Qreg | Long_noreg Qreg | Wide_noreg Qreg ->
[| V16QI; V8HI; if flt then V4SF else V4SI; V2DI |].(idx)
@@ -363,7 +365,8 @@
[| QI; HI; if flt then SF else SI; DI |].(idx)
| Long | Wide | Wide_lane | Wide_scalar
| Long_imm ->
- [| V8QI; V4HI; V2SI; DI |].(idx)
+(* LLVM LOCAL *)
+ [| V8QI; V4HI; V2SI; V1DI |].(idx)
| Narrow | Narrow_imm -> [| V16QI; V8HI; V4SI; V2DI |].(idx)
| Use_operands ops -> mode_of_elt elt (All (0, (find_key_operand ops)))
| _ -> failwith "invalid shape"
@@ -481,7 +484,8 @@
| T_int16x8 | T_uint16x8 | T_poly16x8 -> V8HI
| T_int32x2 | T_uint32x2 -> V2SI
| T_int32x4 | T_uint32x4 -> V4SI
- | T_int64x1 | T_uint64x1 -> DI
+(* LLVM LOCAL *)
+ | T_int64x1 | T_uint64x1 -> V1DI
| T_int64x2 | T_uint64x2 -> V2DI
| T_float32x2 -> V2SF
| T_float32x4 -> V4SF
@@ -497,17 +501,20 @@
4, V8QI -> B_TId8mode
| 4, V4HI -> B_TId16mode
| 4, V2SI -> B_TId32mode
- | 4, DI -> B_TId64mode
+(* LLVM LOCAL *)
+ | 4, V1DI -> B_TId64mode
| 4, V2SF -> B_TIdSFmode
| 6, V8QI -> B_EId8mode
| 6, V4HI -> B_EId16mode
| 6, V2SI -> B_EId32mode
- | 6, DI -> B_EId64mode
+(* LLVM LOCAL *)
+ | 6, V1DI -> B_EId64mode
| 6, V2SF -> B_EIdSFmode
| 8, V8QI -> B_OId8mode
| 8, V4HI -> B_OId16mode
| 8, V2SI -> B_OId32mode
- | 8, DI -> B_OId64mode
+(* LLVM LOCAL *)
+ | 8, V1DI -> B_OId64mode
| 8, V2SF -> B_OIdSFmode
| 8, V16QI -> B_OIq8mode
| 8, V8HI -> B_OIq16mode
@@ -1807,7 +1814,8 @@
V8QI -> "v8qi" | V4HI -> "v4hi" | V2SI -> "v2si" | V2SF -> "v2sf"
| DI -> "di" | V16QI -> "v16qi" | V8HI -> "v8hi" | V4SI -> "v4si"
| V4SF -> "v4sf" | V2DI -> "v2di" | QI -> "qi" | HI -> "hi" | SI -> "si"
- | SF -> "sf"
+(* LLVM LOCAL *)
+ | SF -> "sf" | V1DI -> "v1di"
(* Use uppercase chars for letters which form part of the intrinsic name, but
should be omitted from the builtin name (the info is passed in an extra
More information about the llvm-commits
mailing list