[Lldb-commits] [lldb] r255697 - First pass at LLDBRPC.framework
Greg Clayton via lldb-commits
lldb-commits at lists.llvm.org
Tue Dec 15 15:03:24 PST 2015
Added: lldb/trunk/build/Debug/LLDB.framework/Versions/A/Resources/Clang/include/arm_neon.h
URL: http://llvm.org/viewvc/llvm-project/lldb/trunk/build/Debug/LLDB.framework/Versions/A/Resources/Clang/include/arm_neon.h?rev=255697&view=auto
==============================================================================
--- lldb/trunk/build/Debug/LLDB.framework/Versions/A/Resources/Clang/include/arm_neon.h (added)
+++ lldb/trunk/build/Debug/LLDB.framework/Versions/A/Resources/Clang/include/arm_neon.h Tue Dec 15 17:03:22 2015
@@ -0,0 +1,69237 @@
+/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_NEON_H
+#define __ARM_NEON_H
+
+#if !defined(__ARM_NEON)
+#error "NEON support not enabled"
+#endif
+
+#include <stdint.h>
+
+typedef float float32_t;
+typedef __fp16 float16_t;
+#ifdef __aarch64__
+typedef double float64_t;
+#endif
+
+#ifdef __aarch64__
+typedef uint8_t poly8_t;
+typedef uint16_t poly16_t;
+typedef uint64_t poly64_t;
+typedef __uint128_t poly128_t;
+#else
+typedef int8_t poly8_t;
+typedef int16_t poly16_t;
+#endif
+typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t;
+typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
+typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t;
+typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t;
+typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t;
+typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t;
+typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t;
+typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t;
+typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t;
+typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
+typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t;
+typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t;
+typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t;
+typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t;
+typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t;
+typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t;
+typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t;
+typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
+typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t;
+typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
+#ifdef __aarch64__
+typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t;
+typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t;
+#endif
+typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t;
+typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
+typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
+typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
+#ifdef __aarch64__
+typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
+typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
+#endif
+
+typedef struct int8x8x2_t {
+ int8x8_t val[2];
+} int8x8x2_t;
+
+typedef struct int8x16x2_t {
+ int8x16_t val[2];
+} int8x16x2_t;
+
+typedef struct int16x4x2_t {
+ int16x4_t val[2];
+} int16x4x2_t;
+
+typedef struct int16x8x2_t {
+ int16x8_t val[2];
+} int16x8x2_t;
+
+typedef struct int32x2x2_t {
+ int32x2_t val[2];
+} int32x2x2_t;
+
+typedef struct int32x4x2_t {
+ int32x4_t val[2];
+} int32x4x2_t;
+
+typedef struct int64x1x2_t {
+ int64x1_t val[2];
+} int64x1x2_t;
+
+typedef struct int64x2x2_t {
+ int64x2_t val[2];
+} int64x2x2_t;
+
+typedef struct uint8x8x2_t {
+ uint8x8_t val[2];
+} uint8x8x2_t;
+
+typedef struct uint8x16x2_t {
+ uint8x16_t val[2];
+} uint8x16x2_t;
+
+typedef struct uint16x4x2_t {
+ uint16x4_t val[2];
+} uint16x4x2_t;
+
+typedef struct uint16x8x2_t {
+ uint16x8_t val[2];
+} uint16x8x2_t;
+
+typedef struct uint32x2x2_t {
+ uint32x2_t val[2];
+} uint32x2x2_t;
+
+typedef struct uint32x4x2_t {
+ uint32x4_t val[2];
+} uint32x4x2_t;
+
+typedef struct uint64x1x2_t {
+ uint64x1_t val[2];
+} uint64x1x2_t;
+
+typedef struct uint64x2x2_t {
+ uint64x2_t val[2];
+} uint64x2x2_t;
+
+typedef struct float16x4x2_t {
+ float16x4_t val[2];
+} float16x4x2_t;
+
+typedef struct float16x8x2_t {
+ float16x8_t val[2];
+} float16x8x2_t;
+
+typedef struct float32x2x2_t {
+ float32x2_t val[2];
+} float32x2x2_t;
+
+typedef struct float32x4x2_t {
+ float32x4_t val[2];
+} float32x4x2_t;
+
+#ifdef __aarch64__
+typedef struct float64x1x2_t {
+ float64x1_t val[2];
+} float64x1x2_t;
+
+typedef struct float64x2x2_t {
+ float64x2_t val[2];
+} float64x2x2_t;
+
+#endif
+typedef struct poly8x8x2_t {
+ poly8x8_t val[2];
+} poly8x8x2_t;
+
+typedef struct poly8x16x2_t {
+ poly8x16_t val[2];
+} poly8x16x2_t;
+
+typedef struct poly16x4x2_t {
+ poly16x4_t val[2];
+} poly16x4x2_t;
+
+typedef struct poly16x8x2_t {
+ poly16x8_t val[2];
+} poly16x8x2_t;
+
+#ifdef __aarch64__
+typedef struct poly64x1x2_t {
+ poly64x1_t val[2];
+} poly64x1x2_t;
+
+typedef struct poly64x2x2_t {
+ poly64x2_t val[2];
+} poly64x2x2_t;
+
+#endif
+typedef struct int8x8x3_t {
+ int8x8_t val[3];
+} int8x8x3_t;
+
+typedef struct int8x16x3_t {
+ int8x16_t val[3];
+} int8x16x3_t;
+
+typedef struct int16x4x3_t {
+ int16x4_t val[3];
+} int16x4x3_t;
+
+typedef struct int16x8x3_t {
+ int16x8_t val[3];
+} int16x8x3_t;
+
+typedef struct int32x2x3_t {
+ int32x2_t val[3];
+} int32x2x3_t;
+
+typedef struct int32x4x3_t {
+ int32x4_t val[3];
+} int32x4x3_t;
+
+typedef struct int64x1x3_t {
+ int64x1_t val[3];
+} int64x1x3_t;
+
+typedef struct int64x2x3_t {
+ int64x2_t val[3];
+} int64x2x3_t;
+
+typedef struct uint8x8x3_t {
+ uint8x8_t val[3];
+} uint8x8x3_t;
+
+typedef struct uint8x16x3_t {
+ uint8x16_t val[3];
+} uint8x16x3_t;
+
+typedef struct uint16x4x3_t {
+ uint16x4_t val[3];
+} uint16x4x3_t;
+
+typedef struct uint16x8x3_t {
+ uint16x8_t val[3];
+} uint16x8x3_t;
+
+typedef struct uint32x2x3_t {
+ uint32x2_t val[3];
+} uint32x2x3_t;
+
+typedef struct uint32x4x3_t {
+ uint32x4_t val[3];
+} uint32x4x3_t;
+
+typedef struct uint64x1x3_t {
+ uint64x1_t val[3];
+} uint64x1x3_t;
+
+typedef struct uint64x2x3_t {
+ uint64x2_t val[3];
+} uint64x2x3_t;
+
+typedef struct float16x4x3_t {
+ float16x4_t val[3];
+} float16x4x3_t;
+
+typedef struct float16x8x3_t {
+ float16x8_t val[3];
+} float16x8x3_t;
+
+typedef struct float32x2x3_t {
+ float32x2_t val[3];
+} float32x2x3_t;
+
+typedef struct float32x4x3_t {
+ float32x4_t val[3];
+} float32x4x3_t;
+
+#ifdef __aarch64__
+typedef struct float64x1x3_t {
+ float64x1_t val[3];
+} float64x1x3_t;
+
+typedef struct float64x2x3_t {
+ float64x2_t val[3];
+} float64x2x3_t;
+
+#endif
+typedef struct poly8x8x3_t {
+ poly8x8_t val[3];
+} poly8x8x3_t;
+
+typedef struct poly8x16x3_t {
+ poly8x16_t val[3];
+} poly8x16x3_t;
+
+typedef struct poly16x4x3_t {
+ poly16x4_t val[3];
+} poly16x4x3_t;
+
+typedef struct poly16x8x3_t {
+ poly16x8_t val[3];
+} poly16x8x3_t;
+
+#ifdef __aarch64__
+typedef struct poly64x1x3_t {
+ poly64x1_t val[3];
+} poly64x1x3_t;
+
+typedef struct poly64x2x3_t {
+ poly64x2_t val[3];
+} poly64x2x3_t;
+
+#endif
+typedef struct int8x8x4_t {
+ int8x8_t val[4];
+} int8x8x4_t;
+
+typedef struct int8x16x4_t {
+ int8x16_t val[4];
+} int8x16x4_t;
+
+typedef struct int16x4x4_t {
+ int16x4_t val[4];
+} int16x4x4_t;
+
+typedef struct int16x8x4_t {
+ int16x8_t val[4];
+} int16x8x4_t;
+
+typedef struct int32x2x4_t {
+ int32x2_t val[4];
+} int32x2x4_t;
+
+typedef struct int32x4x4_t {
+ int32x4_t val[4];
+} int32x4x4_t;
+
+typedef struct int64x1x4_t {
+ int64x1_t val[4];
+} int64x1x4_t;
+
+typedef struct int64x2x4_t {
+ int64x2_t val[4];
+} int64x2x4_t;
+
+typedef struct uint8x8x4_t {
+ uint8x8_t val[4];
+} uint8x8x4_t;
+
+typedef struct uint8x16x4_t {
+ uint8x16_t val[4];
+} uint8x16x4_t;
+
+typedef struct uint16x4x4_t {
+ uint16x4_t val[4];
+} uint16x4x4_t;
+
+typedef struct uint16x8x4_t {
+ uint16x8_t val[4];
+} uint16x8x4_t;
+
+typedef struct uint32x2x4_t {
+ uint32x2_t val[4];
+} uint32x2x4_t;
+
+typedef struct uint32x4x4_t {
+ uint32x4_t val[4];
+} uint32x4x4_t;
+
+typedef struct uint64x1x4_t {
+ uint64x1_t val[4];
+} uint64x1x4_t;
+
+typedef struct uint64x2x4_t {
+ uint64x2_t val[4];
+} uint64x2x4_t;
+
+typedef struct float16x4x4_t {
+ float16x4_t val[4];
+} float16x4x4_t;
+
+typedef struct float16x8x4_t {
+ float16x8_t val[4];
+} float16x8x4_t;
+
+typedef struct float32x2x4_t {
+ float32x2_t val[4];
+} float32x2x4_t;
+
+typedef struct float32x4x4_t {
+ float32x4_t val[4];
+} float32x4x4_t;
+
+#ifdef __aarch64__
+typedef struct float64x1x4_t {
+ float64x1_t val[4];
+} float64x1x4_t;
+
+typedef struct float64x2x4_t {
+ float64x2_t val[4];
+} float64x2x4_t;
+
+#endif
+typedef struct poly8x8x4_t {
+ poly8x8_t val[4];
+} poly8x8x4_t;
+
+typedef struct poly8x16x4_t {
+ poly8x16_t val[4];
+} poly8x16x4_t;
+
+typedef struct poly16x4x4_t {
+ poly16x4_t val[4];
+} poly16x4x4_t;
+
+typedef struct poly16x8x4_t {
+ poly16x8_t val[4];
+} poly16x8x4_t;
+
+#ifdef __aarch64__
+typedef struct poly64x1x4_t {
+ poly64x1_t val[4];
+} poly64x1x4_t;
+
+typedef struct poly64x2x4_t {
+ poly64x2_t val[4];
+} poly64x2x4_t;
+
+#endif
+
+#define __ai static inline __attribute__((__always_inline__, __nodebug__))
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vabsq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vabsq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vabsq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vabsq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vabsq_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vabsq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vabsq_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vabsq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vabs_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vabs_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vabs_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vabs_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vabs_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vabs_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vabs_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vabs_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 + __p1;
+ return __ret;
+}
+#else
+__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 + __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 & __p1;
+ return __ret;
+}
+#else
+__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 & __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 & ~__p1;
+ return __ret;
+}
+#else
+__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 & ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5);
+ return __ret;
+}
+#else
+__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ poly16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
+ return __ret;
+}
+#else
+__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37);
+ return __ret;
+}
+#else
+__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 == __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 == __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 >= __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 >= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 > __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 > __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 <= __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 <= __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vclsq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vclsq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vclsq_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vclsq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vclsq_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vclsq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vcls_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vcls_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vcls_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vcls_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vcls_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vcls_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0 < __p1);
+ return __ret;
+}
+#else
+__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__rev0 < __rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vclzq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vclzq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vclzq_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vclzq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vclzq_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vclzq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vclz_u8(uint8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vclz_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vclz_u32(uint32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vclz_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vclz_u16(uint16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vclz_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vclz_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vclz_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vclz_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vclz_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vclz_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vclz_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36);
+ return __ret;
+}
+#else
+__ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vcntq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vcntq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vcnt_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vcnt_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#else
+__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#else
+__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#else
+__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
+ return __ret;
+}
+#else
+__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
+ float16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
+ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
+ float16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
+ return __ret;
+}
+#else
+__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+ return __ret;
+}
+#else
+__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vcreate_p8(uint64_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vcreate_p8(uint64_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vcreate_p16(uint64_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vcreate_p16(uint64_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vcreate_u8(uint64_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vcreate_u8(uint64_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcreate_u32(uint64_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcreate_u32(uint64_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vcreate_u64(uint64_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vcreate_u64(uint64_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vcreate_u16(uint64_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vcreate_u16(uint64_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vcreate_s8(uint64_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#else
+__ai int8x8_t vcreate_s8(uint64_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcreate_f32(uint64_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai float32x2_t vcreate_f32(uint64_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcreate_f16(uint64_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai float16x4_t vcreate_f16(uint64_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vcreate_s32(uint64_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#else
+__ai int32x2_t vcreate_s32(uint64_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vcreate_s64(uint64_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#else
+__ai int64x1_t vcreate_s64(uint64_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vcreate_s16(uint64_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#else
+__ai int16x4_t vcreate_s16(uint64_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t)(__p0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8);
+ return __ret;
+}
+#else
+__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float16x4_t __ret;
+ __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 8);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) {
+ float16x4_t __ret;
+ __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
+ float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \
+ __ret; \
+})
+#else
+#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \
+ __ret; \
+})
+#else
+#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ poly16x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x16_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ poly16x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x16_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x16_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
+ __ret; \
+})
+#else
+#define vdup_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vdup_n_p8(poly8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly8x8_t vdup_n_p8(poly8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vdup_n_p16(poly16_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly16x4_t vdup_n_p16(poly16_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vdupq_n_s8(int8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int8x16_t vdupq_n_s8(int8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vdupq_n_f32(float32_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai float32x4_t vdupq_n_f32(float32_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdupq_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
+ __ret; \
+})
+#else
+#define vdupq_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vdupq_n_s32(int32_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int32x4_t vdupq_n_s32(int32_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vdupq_n_s64(int64_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai int64x2_t vdupq_n_s64(int64_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vdupq_n_s16(int16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int16x8_t vdupq_n_s16(int16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vdup_n_u8(uint8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint8x8_t vdup_n_u8(uint8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vdup_n_u32(uint32_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai uint32x2_t vdup_n_u32(uint32_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vdup_n_u64(uint64_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) {__p0};
+ return __ret;
+}
+#else
+__ai uint64x1_t vdup_n_u64(uint64_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) {__p0};
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vdup_n_u16(uint16_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint16x4_t vdup_n_u16(uint16_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vdup_n_s8(int8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int8x8_t vdup_n_s8(int8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vdup_n_f32(float32_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai float32x2_t vdup_n_f32(float32_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vdup_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
+ __ret; \
+})
+#else
+#define vdup_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vdup_n_s32(int32_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai int32x2_t vdup_n_s32(int32_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vdup_n_s64(int64_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) {__p0};
+ return __ret;
+}
+#else
+__ai int64x1_t vdup_n_s64(int64_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) {__p0};
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vdup_n_s16(int16_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int16x4_t vdup_n_s16(int16_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 ^ __p1;
+ return __ret;
+}
+#else
+__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 ^ __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
+ __ret; \
+})
+#else
+#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
+ __ret; \
+})
+#else
+#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
+ __ret; \
+})
+#else
+#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
+ __ret; \
+})
+#else
+#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
+ __ret; \
+})
+#else
+#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
+ __ret; \
+})
+#else
+#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
+ __ret; \
+})
+#else
+#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
+ __ret; \
+})
+#else
+#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
+ __ret; \
+})
+#else
+#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \
+ __ret; \
+})
+#else
+#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
+ __ret; \
+})
+#else
+#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
+ __ret; \
+})
+#else
+#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
+ __ret; \
+})
+#else
+#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
+ __ret; \
+})
+#else
+#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
+ __ret; \
+})
+#else
+#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#else
+#define vext_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
+ __ret; \
+})
+#else
+#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
+ __ret; \
+})
+#else
+#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \
+ __ret; \
+})
+#else
+#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
+ __ret; \
+})
+#else
+#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#else
+#define vext_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
+ __ret; \
+})
+#else
+#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#else
+__ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#else
+__ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
+ return __ret;
+}
+#else
+__ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1);
+ return __ret;
+}
+#else
+__ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x1_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vget_high_s8(int8x16_t __p0) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#else
+__ai int8x8_t vget_high_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vget_high_f32(float32x4_t __p0) {
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
+ return __ret;
+}
+#else
+__ai float32x2_t vget_high_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) {
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vget_high_f16(float16x8_t __p0) {
+ float16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai float16x4_t vget_high_f16(float16x8_t __p0) {
+ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ float16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) {
+ float16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vget_high_s32(int32x4_t __p0) {
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
+ return __ret;
+}
+#else
+__ai int32x2_t vget_high_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) {
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vget_high_s64(int64x2_t __p0) {
+ int64x1_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1);
+ return __ret;
+}
+#else
+__ai int64x1_t vget_high_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x1_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vget_high_s16(int16x8_t __p0) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai int16x4_t vget_high_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8_t __ret; \
+ __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16_t __ret; \
+ __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8_t __ret; \
+ __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32_t __ret; \
+ __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64_t __ret; \
+ __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16_t __ret; \
+ __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8_t __ret; \
+ __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32_t __ret; \
+ __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32_t __ret; \
+ __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64_t __ret; \
+ __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vget_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#else
+#define vget_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
+ __ret; \
+})
+#define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16_t __ret; \
+ __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+ return __ret;
+}
+#else
+__ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
+ return __ret;
+}
+#else
+__ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0);
+ return __ret;
+}
+#else
+__ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x1_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+ return __ret;
+}
+#else
+__ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vget_low_s8(int8x16_t __p0) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
+ return __ret;
+}
+#else
+__ai int8x8_t vget_low_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vget_low_f32(float32x4_t __p0) {
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
+ return __ret;
+}
+#else
+__ai float32x2_t vget_low_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float16x4_t vget_low_f16(float16x8_t __p0) {
+ float16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+ return __ret;
+}
+#else
+__ai float16x4_t vget_low_f16(float16x8_t __p0) {
+ float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ float16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vget_low_s32(int32x4_t __p0) {
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
+ return __ret;
+}
+#else
+__ai int32x2_t vget_low_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vget_low_s64(int64x2_t __p0) {
+ int64x1_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0);
+ return __ret;
+}
+#else
+__ai int64x1_t vget_low_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x1_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vget_low_s16(int16x8_t __p0) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
+ return __ret;
+}
+#else
+__ai int16x4_t vget_low_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_p8(__p0) __extension__ ({ \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
+ __ret; \
+})
+#else
+#define vld1_p8(__p0) __extension__ ({ \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_p16(__p0) __extension__ ({ \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
+ __ret; \
+})
+#else
+#define vld1_p16(__p0) __extension__ ({ \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p8(__p0) __extension__ ({ \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
+ __ret; \
+})
+#else
+#define vld1q_p8(__p0) __extension__ ({ \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_p16(__p0) __extension__ ({ \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
+ __ret; \
+})
+#else
+#define vld1q_p16(__p0) __extension__ ({ \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u8(__p0) __extension__ ({ \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
+ __ret; \
+})
+#else
+#define vld1q_u8(__p0) __extension__ ({ \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u32(__p0) __extension__ ({ \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
+ __ret; \
+})
+#else
+#define vld1q_u32(__p0) __extension__ ({ \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u64(__p0) __extension__ ({ \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
+ __ret; \
+})
+#else
+#define vld1q_u64(__p0) __extension__ ({ \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_u16(__p0) __extension__ ({ \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
+ __ret; \
+})
+#else
+#define vld1q_u16(__p0) __extension__ ({ \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s8(__p0) __extension__ ({ \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
+ __ret; \
+})
+#else
+#define vld1q_s8(__p0) __extension__ ({ \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f32(__p0) __extension__ ({ \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
+ __ret; \
+})
+#else
+#define vld1q_f32(__p0) __extension__ ({ \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_f16(__p0) __extension__ ({ \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
+ __ret; \
+})
+#else
+#define vld1q_f16(__p0) __extension__ ({ \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s32(__p0) __extension__ ({ \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
+ __ret; \
+})
+#else
+#define vld1q_s32(__p0) __extension__ ({ \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s64(__p0) __extension__ ({ \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
+ __ret; \
+})
+#else
+#define vld1q_s64(__p0) __extension__ ({ \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_s16(__p0) __extension__ ({ \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
+ __ret; \
+})
+#else
+#define vld1q_s16(__p0) __extension__ ({ \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u8(__p0) __extension__ ({ \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
+ __ret; \
+})
+#else
+#define vld1_u8(__p0) __extension__ ({ \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u32(__p0) __extension__ ({ \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
+ __ret; \
+})
+#else
+#define vld1_u32(__p0) __extension__ ({ \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u64(__p0) __extension__ ({ \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
+ __ret; \
+})
+#else
+#define vld1_u64(__p0) __extension__ ({ \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_u16(__p0) __extension__ ({ \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
+ __ret; \
+})
+#else
+#define vld1_u16(__p0) __extension__ ({ \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s8(__p0) __extension__ ({ \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
+ __ret; \
+})
+#else
+#define vld1_s8(__p0) __extension__ ({ \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_f32(__p0) __extension__ ({ \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
+ __ret; \
+})
+#else
+#define vld1_f32(__p0) __extension__ ({ \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_f16(__p0) __extension__ ({ \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
+ __ret; \
+})
+#else
+#define vld1_f16(__p0) __extension__ ({ \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s32(__p0) __extension__ ({ \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
+ __ret; \
+})
+#else
+#define vld1_s32(__p0) __extension__ ({ \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s64(__p0) __extension__ ({ \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
+ __ret; \
+})
+#else
+#define vld1_s64(__p0) __extension__ ({ \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_s16(__p0) __extension__ ({ \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
+ __ret; \
+})
+#else
+#define vld1_s16(__p0) __extension__ ({ \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_p8(__p0) __extension__ ({ \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
+ __ret; \
+})
+#else
+#define vld1_dup_p8(__p0) __extension__ ({ \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_p16(__p0) __extension__ ({ \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
+ __ret; \
+})
+#else
+#define vld1_dup_p16(__p0) __extension__ ({ \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_p8(__p0) __extension__ ({ \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
+ __ret; \
+})
+#else
+#define vld1q_dup_p8(__p0) __extension__ ({ \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_p16(__p0) __extension__ ({ \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
+ __ret; \
+})
+#else
+#define vld1q_dup_p16(__p0) __extension__ ({ \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_u8(__p0) __extension__ ({ \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
+ __ret; \
+})
+#else
+#define vld1q_dup_u8(__p0) __extension__ ({ \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_u32(__p0) __extension__ ({ \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
+ __ret; \
+})
+#else
+#define vld1q_dup_u32(__p0) __extension__ ({ \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_u64(__p0) __extension__ ({ \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
+ __ret; \
+})
+#else
+#define vld1q_dup_u64(__p0) __extension__ ({ \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_u16(__p0) __extension__ ({ \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
+ __ret; \
+})
+#else
+#define vld1q_dup_u16(__p0) __extension__ ({ \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_s8(__p0) __extension__ ({ \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
+ __ret; \
+})
+#else
+#define vld1q_dup_s8(__p0) __extension__ ({ \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_f32(__p0) __extension__ ({ \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
+ __ret; \
+})
+#else
+#define vld1q_dup_f32(__p0) __extension__ ({ \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_f16(__p0) __extension__ ({ \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
+ __ret; \
+})
+#else
+#define vld1q_dup_f16(__p0) __extension__ ({ \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_s32(__p0) __extension__ ({ \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
+ __ret; \
+})
+#else
+#define vld1q_dup_s32(__p0) __extension__ ({ \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_s64(__p0) __extension__ ({ \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
+ __ret; \
+})
+#else
+#define vld1q_dup_s64(__p0) __extension__ ({ \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_dup_s16(__p0) __extension__ ({ \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
+ __ret; \
+})
+#else
+#define vld1q_dup_s16(__p0) __extension__ ({ \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_u8(__p0) __extension__ ({ \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
+ __ret; \
+})
+#else
+#define vld1_dup_u8(__p0) __extension__ ({ \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_u32(__p0) __extension__ ({ \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
+ __ret; \
+})
+#else
+#define vld1_dup_u32(__p0) __extension__ ({ \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_u64(__p0) __extension__ ({ \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
+ __ret; \
+})
+#else
+#define vld1_dup_u64(__p0) __extension__ ({ \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_u16(__p0) __extension__ ({ \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
+ __ret; \
+})
+#else
+#define vld1_dup_u16(__p0) __extension__ ({ \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_s8(__p0) __extension__ ({ \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
+ __ret; \
+})
+#else
+#define vld1_dup_s8(__p0) __extension__ ({ \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_f32(__p0) __extension__ ({ \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
+ __ret; \
+})
+#else
+#define vld1_dup_f32(__p0) __extension__ ({ \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_f16(__p0) __extension__ ({ \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
+ __ret; \
+})
+#else
+#define vld1_dup_f16(__p0) __extension__ ({ \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_s32(__p0) __extension__ ({ \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
+ __ret; \
+})
+#else
+#define vld1_dup_s32(__p0) __extension__ ({ \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_s64(__p0) __extension__ ({ \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
+ __ret; \
+})
+#else
+#define vld1_dup_s64(__p0) __extension__ ({ \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_dup_s16(__p0) __extension__ ({ \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
+ __ret; \
+})
+#else
+#define vld1_dup_s16(__p0) __extension__ ({ \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
+ __ret; \
+})
+#else
+#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
+ __ret; \
+})
+#else
+#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
+ __ret; \
+})
+#else
+#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
+ __ret; \
+})
+#else
+#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
+ __ret; \
+})
+#else
+#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
+ __ret; \
+})
+#else
+#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
+ __ret; \
+})
+#else
+#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
+ __ret; \
+})
+#else
+#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
+ __ret; \
+})
+#else
+#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
+ __ret; \
+})
+#else
+#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8_t __s1 = __p1; \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
+ __ret; \
+})
+#else
+#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8_t __s1 = __p1; \
+ float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
+ __ret; \
+})
+#else
+#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
+ __ret; \
+})
+#else
+#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
+ __ret; \
+})
+#else
+#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
+ __ret; \
+})
+#else
+#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
+ __ret; \
+})
+#else
+#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#else
+#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
+ __ret; \
+})
+#else
+#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
+ __ret; \
+})
+#else
+#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
+ __ret; \
+})
+#else
+#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4_t __s1 = __p1; \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
+ __ret; \
+})
+#else
+#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4_t __s1 = __p1; \
+ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
+ __ret; \
+})
+#else
+#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#else
+#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
+ __ret; \
+})
+#else
+#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_p8(__p0) __extension__ ({ \
+ poly8x8x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 4); \
+ __ret; \
+})
+#else
+#define vld2_p8(__p0) __extension__ ({ \
+ poly8x8x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_p16(__p0) __extension__ ({ \
+ poly16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 5); \
+ __ret; \
+})
+#else
+#define vld2_p16(__p0) __extension__ ({ \
+ poly16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_p8(__p0) __extension__ ({ \
+ poly8x16x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 36); \
+ __ret; \
+})
+#else
+#define vld2q_p8(__p0) __extension__ ({ \
+ poly8x16x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 36); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_p16(__p0) __extension__ ({ \
+ poly16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 37); \
+ __ret; \
+})
+#else
+#define vld2q_p16(__p0) __extension__ ({ \
+ poly16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_u8(__p0) __extension__ ({ \
+ uint8x16x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 48); \
+ __ret; \
+})
+#else
+#define vld2q_u8(__p0) __extension__ ({ \
+ uint8x16x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 48); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_u32(__p0) __extension__ ({ \
+ uint32x4x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 50); \
+ __ret; \
+})
+#else
+#define vld2q_u32(__p0) __extension__ ({ \
+ uint32x4x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_u16(__p0) __extension__ ({ \
+ uint16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 49); \
+ __ret; \
+})
+#else
+#define vld2q_u16(__p0) __extension__ ({ \
+ uint16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_s8(__p0) __extension__ ({ \
+ int8x16x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 32); \
+ __ret; \
+})
+#else
+#define vld2q_s8(__p0) __extension__ ({ \
+ int8x16x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 32); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_f32(__p0) __extension__ ({ \
+ float32x4x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 41); \
+ __ret; \
+})
+#else
+#define vld2q_f32(__p0) __extension__ ({ \
+ float32x4x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_f16(__p0) __extension__ ({ \
+ float16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 40); \
+ __ret; \
+})
+#else
+#define vld2q_f16(__p0) __extension__ ({ \
+ float16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_s32(__p0) __extension__ ({ \
+ int32x4x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 34); \
+ __ret; \
+})
+#else
+#define vld2q_s32(__p0) __extension__ ({ \
+ int32x4x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_s16(__p0) __extension__ ({ \
+ int16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 33); \
+ __ret; \
+})
+#else
+#define vld2q_s16(__p0) __extension__ ({ \
+ int16x8x2_t __ret; \
+ __builtin_neon_vld2q_v(&__ret, __p0, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_u8(__p0) __extension__ ({ \
+ uint8x8x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 16); \
+ __ret; \
+})
+#else
+#define vld2_u8(__p0) __extension__ ({ \
+ uint8x8x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_u32(__p0) __extension__ ({ \
+ uint32x2x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 18); \
+ __ret; \
+})
+#else
+#define vld2_u32(__p0) __extension__ ({ \
+ uint32x2x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_u64(__p0) __extension__ ({ \
+ uint64x1x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#else
+#define vld2_u64(__p0) __extension__ ({ \
+ uint64x1x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_u16(__p0) __extension__ ({ \
+ uint16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 17); \
+ __ret; \
+})
+#else
+#define vld2_u16(__p0) __extension__ ({ \
+ uint16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_s8(__p0) __extension__ ({ \
+ int8x8x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 0); \
+ __ret; \
+})
+#else
+#define vld2_s8(__p0) __extension__ ({ \
+ int8x8x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_f32(__p0) __extension__ ({ \
+ float32x2x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 9); \
+ __ret; \
+})
+#else
+#define vld2_f32(__p0) __extension__ ({ \
+ float32x2x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_f16(__p0) __extension__ ({ \
+ float16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 8); \
+ __ret; \
+})
+#else
+#define vld2_f16(__p0) __extension__ ({ \
+ float16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_s32(__p0) __extension__ ({ \
+ int32x2x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 2); \
+ __ret; \
+})
+#else
+#define vld2_s32(__p0) __extension__ ({ \
+ int32x2x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_s64(__p0) __extension__ ({ \
+ int64x1x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#else
+#define vld2_s64(__p0) __extension__ ({ \
+ int64x1x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_s16(__p0) __extension__ ({ \
+ int16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 1); \
+ __ret; \
+})
+#else
+#define vld2_s16(__p0) __extension__ ({ \
+ int16x4x2_t __ret; \
+ __builtin_neon_vld2_v(&__ret, __p0, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_p8(__p0) __extension__ ({ \
+ poly8x8x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
+ __ret; \
+})
+#else
+#define vld2_dup_p8(__p0) __extension__ ({ \
+ poly8x8x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_p16(__p0) __extension__ ({ \
+ poly16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
+ __ret; \
+})
+#else
+#define vld2_dup_p16(__p0) __extension__ ({ \
+ poly16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_u8(__p0) __extension__ ({ \
+ uint8x8x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
+ __ret; \
+})
+#else
+#define vld2_dup_u8(__p0) __extension__ ({ \
+ uint8x8x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_u32(__p0) __extension__ ({ \
+ uint32x2x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
+ __ret; \
+})
+#else
+#define vld2_dup_u32(__p0) __extension__ ({ \
+ uint32x2x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_u64(__p0) __extension__ ({ \
+ uint64x1x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#else
+#define vld2_dup_u64(__p0) __extension__ ({ \
+ uint64x1x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_u16(__p0) __extension__ ({ \
+ uint16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
+ __ret; \
+})
+#else
+#define vld2_dup_u16(__p0) __extension__ ({ \
+ uint16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_s8(__p0) __extension__ ({ \
+ int8x8x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
+ __ret; \
+})
+#else
+#define vld2_dup_s8(__p0) __extension__ ({ \
+ int8x8x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_f32(__p0) __extension__ ({ \
+ float32x2x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
+ __ret; \
+})
+#else
+#define vld2_dup_f32(__p0) __extension__ ({ \
+ float32x2x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_f16(__p0) __extension__ ({ \
+ float16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
+ __ret; \
+})
+#else
+#define vld2_dup_f16(__p0) __extension__ ({ \
+ float16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_s32(__p0) __extension__ ({ \
+ int32x2x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
+ __ret; \
+})
+#else
+#define vld2_dup_s32(__p0) __extension__ ({ \
+ int32x2x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_s64(__p0) __extension__ ({ \
+ int64x1x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#else
+#define vld2_dup_s64(__p0) __extension__ ({ \
+ int64x1x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_dup_s16(__p0) __extension__ ({ \
+ int16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
+ __ret; \
+})
+#else
+#define vld2_dup_s16(__p0) __extension__ ({ \
+ int16x4x2_t __ret; \
+ __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x2_t __s1 = __p1; \
+ poly8x8x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
+ __ret; \
+})
+#else
+#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x2_t __s1 = __p1; \
+ poly8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x2_t __s1 = __p1; \
+ poly16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
+ __ret; \
+})
+#else
+#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x2_t __s1 = __p1; \
+ poly16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ poly16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x2_t __s1 = __p1; \
+ poly16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
+ __ret; \
+})
+#else
+#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x2_t __s1 = __p1; \
+ poly16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x2_t __s1 = __p1; \
+ uint32x4x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
+ __ret; \
+})
+#else
+#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x2_t __s1 = __p1; \
+ uint32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ uint32x4x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x2_t __s1 = __p1; \
+ uint16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
+ __ret; \
+})
+#else
+#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x2_t __s1 = __p1; \
+ uint16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x2_t __s1 = __p1; \
+ float32x4x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 41); \
+ __ret; \
+})
+#else
+#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x2_t __s1 = __p1; \
+ float32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ float32x4x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x2_t __s1 = __p1; \
+ float16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 40); \
+ __ret; \
+})
+#else
+#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x2_t __s1 = __p1; \
+ float16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ float16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x2_t __s1 = __p1; \
+ int32x4x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 34); \
+ __ret; \
+})
+#else
+#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x2_t __s1 = __p1; \
+ int32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ int32x4x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x2_t __s1 = __p1; \
+ int16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 33); \
+ __ret; \
+})
+#else
+#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x2_t __s1 = __p1; \
+ int16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8x2_t __ret; \
+ __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x2_t __s1 = __p1; \
+ uint8x8x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
+ __ret; \
+})
+#else
+#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x2_t __s1 = __p1; \
+ uint8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x2_t __s1 = __p1; \
+ uint32x2x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
+ __ret; \
+})
+#else
+#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x2_t __s1 = __p1; \
+ uint32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ uint32x2x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x2_t __s1 = __p1; \
+ uint16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
+ __ret; \
+})
+#else
+#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x2_t __s1 = __p1; \
+ uint16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ uint16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x2_t __s1 = __p1; \
+ int8x8x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
+ __ret; \
+})
+#else
+#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x2_t __s1 = __p1; \
+ int8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x2_t __s1 = __p1; \
+ float32x2x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 9); \
+ __ret; \
+})
+#else
+#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x2_t __s1 = __p1; \
+ float32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ float32x2x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x2_t __s1 = __p1; \
+ float16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 8); \
+ __ret; \
+})
+#else
+#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x2_t __s1 = __p1; \
+ float16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ float16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x2_t __s1 = __p1; \
+ int32x2x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 2); \
+ __ret; \
+})
+#else
+#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x2_t __s1 = __p1; \
+ int32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ int32x2x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x2_t __s1 = __p1; \
+ int16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 1); \
+ __ret; \
+})
+#else
+#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x2_t __s1 = __p1; \
+ int16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ int16x4x2_t __ret; \
+ __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_p8(__p0) __extension__ ({ \
+ poly8x8x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 4); \
+ __ret; \
+})
+#else
+#define vld3_p8(__p0) __extension__ ({ \
+ poly8x8x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_p16(__p0) __extension__ ({ \
+ poly16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 5); \
+ __ret; \
+})
+#else
+#define vld3_p16(__p0) __extension__ ({ \
+ poly16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_p8(__p0) __extension__ ({ \
+ poly8x16x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 36); \
+ __ret; \
+})
+#else
+#define vld3q_p8(__p0) __extension__ ({ \
+ poly8x16x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 36); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_p16(__p0) __extension__ ({ \
+ poly16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 37); \
+ __ret; \
+})
+#else
+#define vld3q_p16(__p0) __extension__ ({ \
+ poly16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_u8(__p0) __extension__ ({ \
+ uint8x16x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 48); \
+ __ret; \
+})
+#else
+#define vld3q_u8(__p0) __extension__ ({ \
+ uint8x16x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 48); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_u32(__p0) __extension__ ({ \
+ uint32x4x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 50); \
+ __ret; \
+})
+#else
+#define vld3q_u32(__p0) __extension__ ({ \
+ uint32x4x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_u16(__p0) __extension__ ({ \
+ uint16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 49); \
+ __ret; \
+})
+#else
+#define vld3q_u16(__p0) __extension__ ({ \
+ uint16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_s8(__p0) __extension__ ({ \
+ int8x16x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 32); \
+ __ret; \
+})
+#else
+#define vld3q_s8(__p0) __extension__ ({ \
+ int8x16x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 32); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_f32(__p0) __extension__ ({ \
+ float32x4x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 41); \
+ __ret; \
+})
+#else
+#define vld3q_f32(__p0) __extension__ ({ \
+ float32x4x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_f16(__p0) __extension__ ({ \
+ float16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 40); \
+ __ret; \
+})
+#else
+#define vld3q_f16(__p0) __extension__ ({ \
+ float16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_s32(__p0) __extension__ ({ \
+ int32x4x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 34); \
+ __ret; \
+})
+#else
+#define vld3q_s32(__p0) __extension__ ({ \
+ int32x4x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_s16(__p0) __extension__ ({ \
+ int16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 33); \
+ __ret; \
+})
+#else
+#define vld3q_s16(__p0) __extension__ ({ \
+ int16x8x3_t __ret; \
+ __builtin_neon_vld3q_v(&__ret, __p0, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_u8(__p0) __extension__ ({ \
+ uint8x8x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 16); \
+ __ret; \
+})
+#else
+#define vld3_u8(__p0) __extension__ ({ \
+ uint8x8x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_u32(__p0) __extension__ ({ \
+ uint32x2x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 18); \
+ __ret; \
+})
+#else
+#define vld3_u32(__p0) __extension__ ({ \
+ uint32x2x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_u64(__p0) __extension__ ({ \
+ uint64x1x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#else
+#define vld3_u64(__p0) __extension__ ({ \
+ uint64x1x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_u16(__p0) __extension__ ({ \
+ uint16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 17); \
+ __ret; \
+})
+#else
+#define vld3_u16(__p0) __extension__ ({ \
+ uint16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_s8(__p0) __extension__ ({ \
+ int8x8x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 0); \
+ __ret; \
+})
+#else
+#define vld3_s8(__p0) __extension__ ({ \
+ int8x8x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_f32(__p0) __extension__ ({ \
+ float32x2x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 9); \
+ __ret; \
+})
+#else
+#define vld3_f32(__p0) __extension__ ({ \
+ float32x2x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_f16(__p0) __extension__ ({ \
+ float16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 8); \
+ __ret; \
+})
+#else
+#define vld3_f16(__p0) __extension__ ({ \
+ float16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_s32(__p0) __extension__ ({ \
+ int32x2x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 2); \
+ __ret; \
+})
+#else
+#define vld3_s32(__p0) __extension__ ({ \
+ int32x2x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_s64(__p0) __extension__ ({ \
+ int64x1x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#else
+#define vld3_s64(__p0) __extension__ ({ \
+ int64x1x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_s16(__p0) __extension__ ({ \
+ int16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 1); \
+ __ret; \
+})
+#else
+#define vld3_s16(__p0) __extension__ ({ \
+ int16x4x3_t __ret; \
+ __builtin_neon_vld3_v(&__ret, __p0, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_p8(__p0) __extension__ ({ \
+ poly8x8x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
+ __ret; \
+})
+#else
+#define vld3_dup_p8(__p0) __extension__ ({ \
+ poly8x8x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_p16(__p0) __extension__ ({ \
+ poly16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
+ __ret; \
+})
+#else
+#define vld3_dup_p16(__p0) __extension__ ({ \
+ poly16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_u8(__p0) __extension__ ({ \
+ uint8x8x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
+ __ret; \
+})
+#else
+#define vld3_dup_u8(__p0) __extension__ ({ \
+ uint8x8x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_u32(__p0) __extension__ ({ \
+ uint32x2x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
+ __ret; \
+})
+#else
+#define vld3_dup_u32(__p0) __extension__ ({ \
+ uint32x2x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_u64(__p0) __extension__ ({ \
+ uint64x1x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#else
+#define vld3_dup_u64(__p0) __extension__ ({ \
+ uint64x1x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_u16(__p0) __extension__ ({ \
+ uint16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
+ __ret; \
+})
+#else
+#define vld3_dup_u16(__p0) __extension__ ({ \
+ uint16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_s8(__p0) __extension__ ({ \
+ int8x8x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
+ __ret; \
+})
+#else
+#define vld3_dup_s8(__p0) __extension__ ({ \
+ int8x8x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_f32(__p0) __extension__ ({ \
+ float32x2x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
+ __ret; \
+})
+#else
+#define vld3_dup_f32(__p0) __extension__ ({ \
+ float32x2x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_f16(__p0) __extension__ ({ \
+ float16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
+ __ret; \
+})
+#else
+#define vld3_dup_f16(__p0) __extension__ ({ \
+ float16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_s32(__p0) __extension__ ({ \
+ int32x2x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
+ __ret; \
+})
+#else
+#define vld3_dup_s32(__p0) __extension__ ({ \
+ int32x2x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_s64(__p0) __extension__ ({ \
+ int64x1x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#else
+#define vld3_dup_s64(__p0) __extension__ ({ \
+ int64x1x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_dup_s16(__p0) __extension__ ({ \
+ int16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
+ __ret; \
+})
+#else
+#define vld3_dup_s16(__p0) __extension__ ({ \
+ int16x4x3_t __ret; \
+ __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x3_t __s1 = __p1; \
+ poly8x8x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
+ __ret; \
+})
+#else
+#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x3_t __s1 = __p1; \
+ poly8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x3_t __s1 = __p1; \
+ poly16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
+ __ret; \
+})
+#else
+#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x3_t __s1 = __p1; \
+ poly16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ poly16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x3_t __s1 = __p1; \
+ poly16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
+ __ret; \
+})
+#else
+#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x3_t __s1 = __p1; \
+ poly16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x3_t __s1 = __p1; \
+ uint32x4x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
+ __ret; \
+})
+#else
+#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x3_t __s1 = __p1; \
+ uint32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ uint32x4x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x3_t __s1 = __p1; \
+ uint16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
+ __ret; \
+})
+#else
+#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x3_t __s1 = __p1; \
+ uint16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x3_t __s1 = __p1; \
+ float32x4x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
+ __ret; \
+})
+#else
+#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x3_t __s1 = __p1; \
+ float32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ float32x4x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x3_t __s1 = __p1; \
+ float16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
+ __ret; \
+})
+#else
+#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x3_t __s1 = __p1; \
+ float16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ float16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x3_t __s1 = __p1; \
+ int32x4x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
+ __ret; \
+})
+#else
+#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x3_t __s1 = __p1; \
+ int32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ int32x4x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x3_t __s1 = __p1; \
+ int16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
+ __ret; \
+})
+#else
+#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x3_t __s1 = __p1; \
+ int16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8x3_t __ret; \
+ __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x3_t __s1 = __p1; \
+ uint8x8x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
+ __ret; \
+})
+#else
+#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x3_t __s1 = __p1; \
+ uint8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x3_t __s1 = __p1; \
+ uint32x2x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
+ __ret; \
+})
+#else
+#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x3_t __s1 = __p1; \
+ uint32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ uint32x2x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x3_t __s1 = __p1; \
+ uint16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
+ __ret; \
+})
+#else
+#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x3_t __s1 = __p1; \
+ uint16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ uint16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x3_t __s1 = __p1; \
+ int8x8x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
+ __ret; \
+})
+#else
+#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x3_t __s1 = __p1; \
+ int8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x3_t __s1 = __p1; \
+ float32x2x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
+ __ret; \
+})
+#else
+#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x3_t __s1 = __p1; \
+ float32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ float32x2x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x3_t __s1 = __p1; \
+ float16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
+ __ret; \
+})
+#else
+#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x3_t __s1 = __p1; \
+ float16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ float16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x3_t __s1 = __p1; \
+ int32x2x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
+ __ret; \
+})
+#else
+#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x3_t __s1 = __p1; \
+ int32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ int32x2x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x3_t __s1 = __p1; \
+ int16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
+ __ret; \
+})
+#else
+#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x3_t __s1 = __p1; \
+ int16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ int16x4x3_t __ret; \
+ __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_p8(__p0) __extension__ ({ \
+ poly8x8x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 4); \
+ __ret; \
+})
+#else
+#define vld4_p8(__p0) __extension__ ({ \
+ poly8x8x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_p16(__p0) __extension__ ({ \
+ poly16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 5); \
+ __ret; \
+})
+#else
+#define vld4_p16(__p0) __extension__ ({ \
+ poly16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_p8(__p0) __extension__ ({ \
+ poly8x16x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 36); \
+ __ret; \
+})
+#else
+#define vld4q_p8(__p0) __extension__ ({ \
+ poly8x16x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 36); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_p16(__p0) __extension__ ({ \
+ poly16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 37); \
+ __ret; \
+})
+#else
+#define vld4q_p16(__p0) __extension__ ({ \
+ poly16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_u8(__p0) __extension__ ({ \
+ uint8x16x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 48); \
+ __ret; \
+})
+#else
+#define vld4q_u8(__p0) __extension__ ({ \
+ uint8x16x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 48); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_u32(__p0) __extension__ ({ \
+ uint32x4x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 50); \
+ __ret; \
+})
+#else
+#define vld4q_u32(__p0) __extension__ ({ \
+ uint32x4x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_u16(__p0) __extension__ ({ \
+ uint16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 49); \
+ __ret; \
+})
+#else
+#define vld4q_u16(__p0) __extension__ ({ \
+ uint16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_s8(__p0) __extension__ ({ \
+ int8x16x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 32); \
+ __ret; \
+})
+#else
+#define vld4q_s8(__p0) __extension__ ({ \
+ int8x16x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 32); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_f32(__p0) __extension__ ({ \
+ float32x4x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 41); \
+ __ret; \
+})
+#else
+#define vld4q_f32(__p0) __extension__ ({ \
+ float32x4x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_f16(__p0) __extension__ ({ \
+ float16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 40); \
+ __ret; \
+})
+#else
+#define vld4q_f16(__p0) __extension__ ({ \
+ float16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_s32(__p0) __extension__ ({ \
+ int32x4x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 34); \
+ __ret; \
+})
+#else
+#define vld4q_s32(__p0) __extension__ ({ \
+ int32x4x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_s16(__p0) __extension__ ({ \
+ int16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 33); \
+ __ret; \
+})
+#else
+#define vld4q_s16(__p0) __extension__ ({ \
+ int16x8x4_t __ret; \
+ __builtin_neon_vld4q_v(&__ret, __p0, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_u8(__p0) __extension__ ({ \
+ uint8x8x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 16); \
+ __ret; \
+})
+#else
+#define vld4_u8(__p0) __extension__ ({ \
+ uint8x8x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_u32(__p0) __extension__ ({ \
+ uint32x2x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 18); \
+ __ret; \
+})
+#else
+#define vld4_u32(__p0) __extension__ ({ \
+ uint32x2x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_u64(__p0) __extension__ ({ \
+ uint64x1x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#else
+#define vld4_u64(__p0) __extension__ ({ \
+ uint64x1x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_u16(__p0) __extension__ ({ \
+ uint16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 17); \
+ __ret; \
+})
+#else
+#define vld4_u16(__p0) __extension__ ({ \
+ uint16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_s8(__p0) __extension__ ({ \
+ int8x8x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 0); \
+ __ret; \
+})
+#else
+#define vld4_s8(__p0) __extension__ ({ \
+ int8x8x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_f32(__p0) __extension__ ({ \
+ float32x2x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 9); \
+ __ret; \
+})
+#else
+#define vld4_f32(__p0) __extension__ ({ \
+ float32x2x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_f16(__p0) __extension__ ({ \
+ float16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 8); \
+ __ret; \
+})
+#else
+#define vld4_f16(__p0) __extension__ ({ \
+ float16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_s32(__p0) __extension__ ({ \
+ int32x2x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 2); \
+ __ret; \
+})
+#else
+#define vld4_s32(__p0) __extension__ ({ \
+ int32x2x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_s64(__p0) __extension__ ({ \
+ int64x1x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#else
+#define vld4_s64(__p0) __extension__ ({ \
+ int64x1x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_s16(__p0) __extension__ ({ \
+ int16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 1); \
+ __ret; \
+})
+#else
+#define vld4_s16(__p0) __extension__ ({ \
+ int16x4x4_t __ret; \
+ __builtin_neon_vld4_v(&__ret, __p0, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_p8(__p0) __extension__ ({ \
+ poly8x8x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
+ __ret; \
+})
+#else
+#define vld4_dup_p8(__p0) __extension__ ({ \
+ poly8x8x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_p16(__p0) __extension__ ({ \
+ poly16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
+ __ret; \
+})
+#else
+#define vld4_dup_p16(__p0) __extension__ ({ \
+ poly16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_u8(__p0) __extension__ ({ \
+ uint8x8x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
+ __ret; \
+})
+#else
+#define vld4_dup_u8(__p0) __extension__ ({ \
+ uint8x8x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_u32(__p0) __extension__ ({ \
+ uint32x2x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
+ __ret; \
+})
+#else
+#define vld4_dup_u32(__p0) __extension__ ({ \
+ uint32x2x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_u64(__p0) __extension__ ({ \
+ uint64x1x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#else
+#define vld4_dup_u64(__p0) __extension__ ({ \
+ uint64x1x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_u16(__p0) __extension__ ({ \
+ uint16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
+ __ret; \
+})
+#else
+#define vld4_dup_u16(__p0) __extension__ ({ \
+ uint16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_s8(__p0) __extension__ ({ \
+ int8x8x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
+ __ret; \
+})
+#else
+#define vld4_dup_s8(__p0) __extension__ ({ \
+ int8x8x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_f32(__p0) __extension__ ({ \
+ float32x2x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
+ __ret; \
+})
+#else
+#define vld4_dup_f32(__p0) __extension__ ({ \
+ float32x2x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_f16(__p0) __extension__ ({ \
+ float16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
+ __ret; \
+})
+#else
+#define vld4_dup_f16(__p0) __extension__ ({ \
+ float16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_s32(__p0) __extension__ ({ \
+ int32x2x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
+ __ret; \
+})
+#else
+#define vld4_dup_s32(__p0) __extension__ ({ \
+ int32x2x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_s64(__p0) __extension__ ({ \
+ int64x1x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#else
+#define vld4_dup_s64(__p0) __extension__ ({ \
+ int64x1x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_dup_s16(__p0) __extension__ ({ \
+ int16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
+ __ret; \
+})
+#else
+#define vld4_dup_s16(__p0) __extension__ ({ \
+ int16x4x4_t __ret; \
+ __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x4_t __s1 = __p1; \
+ poly8x8x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
+ __ret; \
+})
+#else
+#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x4_t __s1 = __p1; \
+ poly8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x4_t __s1 = __p1; \
+ poly16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
+ __ret; \
+})
+#else
+#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x4_t __s1 = __p1; \
+ poly16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ poly16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x4_t __s1 = __p1; \
+ poly16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
+ __ret; \
+})
+#else
+#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x4_t __s1 = __p1; \
+ poly16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x4_t __s1 = __p1; \
+ uint32x4x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
+ __ret; \
+})
+#else
+#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x4_t __s1 = __p1; \
+ uint32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ uint32x4x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x4_t __s1 = __p1; \
+ uint16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
+ __ret; \
+})
+#else
+#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x4_t __s1 = __p1; \
+ uint16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x4_t __s1 = __p1; \
+ float32x4x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
+ __ret; \
+})
+#else
+#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x4_t __s1 = __p1; \
+ float32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ float32x4x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x4_t __s1 = __p1; \
+ float16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
+ __ret; \
+})
+#else
+#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x4_t __s1 = __p1; \
+ float16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ float16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x4_t __s1 = __p1; \
+ int32x4x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
+ __ret; \
+})
+#else
+#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x4_t __s1 = __p1; \
+ int32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ int32x4x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x4_t __s1 = __p1; \
+ int16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
+ __ret; \
+})
+#else
+#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x4_t __s1 = __p1; \
+ int16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8x4_t __ret; \
+ __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x4_t __s1 = __p1; \
+ uint8x8x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
+ __ret; \
+})
+#else
+#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x4_t __s1 = __p1; \
+ uint8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x4_t __s1 = __p1; \
+ uint32x2x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
+ __ret; \
+})
+#else
+#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x4_t __s1 = __p1; \
+ uint32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ uint32x2x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x4_t __s1 = __p1; \
+ uint16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
+ __ret; \
+})
+#else
+#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x4_t __s1 = __p1; \
+ uint16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ uint16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x4_t __s1 = __p1; \
+ int8x8x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
+ __ret; \
+})
+#else
+#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x4_t __s1 = __p1; \
+ int8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x4_t __s1 = __p1; \
+ float32x2x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
+ __ret; \
+})
+#else
+#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x4_t __s1 = __p1; \
+ float32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ float32x2x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x4_t __s1 = __p1; \
+ float16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
+ __ret; \
+})
+#else
+#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x4_t __s1 = __p1; \
+ float16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ float16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x4_t __s1 = __p1; \
+ int32x2x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
+ __ret; \
+})
+#else
+#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x4_t __s1 = __p1; \
+ int32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ int32x2x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x4_t __s1 = __p1; \
+ int16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
+ __ret; \
+})
+#else
+#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x4_t __s1 = __p1; \
+ int16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ int16x4x4_t __ret; \
+ __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
+ \
+ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
+ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
+ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
+ __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint16x8_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int8x16_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint32x2_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint16x4_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int8x8_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __ret;
+ __ret = __p0 + __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 + __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x4_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x8_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x4_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x8_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x2_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x4_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x2_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ float32x2_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x2_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x4_t __ret; \
+ __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
+ uint16x8_t __ret;
+ __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+ float32x4_t __ret;
+ __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
+ int16x8_t __ret;
+ __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+ uint32x2_t __ret;
+ __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2};
+ return __ret;
+}
+#else
+__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+ uint16x4_t __ret;
+ __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+ float32x2_t __ret;
+ __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2};
+ return __ret;
+}
+#else
+__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int32x2_t __ret;
+ __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2};
+ return __ret;
+}
+#else
+__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int16x4_t __ret;
+ __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint16x8_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int8x16_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint32x2_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint16x4_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int8x8_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __ret;
+ __ret = __p0 - __p1 * __p2;
+ return __ret;
+}
+#else
+__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 - __rev1 * __rev2;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x4_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x8_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x4_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x8_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x2_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __s2 = __p2; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x4_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __s2 = __p2; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x2_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __s2 = __p2; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ float32x2_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x2_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x4_t __ret; \
+ __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
+ __ret; \
+})
+#else
+#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
+ uint32x4_t __ret;
+ __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
+ uint16x8_t __ret;
+ __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+ float32x4_t __ret;
+ __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
+ int32x4_t __ret;
+ __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
+ int16x8_t __ret;
+ __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+ uint32x2_t __ret;
+ __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2};
+ return __ret;
+}
+#else
+__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+ uint16x4_t __ret;
+ __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+ float32x2_t __ret;
+ __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2};
+ return __ret;
+}
+#else
+__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int32x2_t __ret;
+ __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2};
+ return __ret;
+}
+#else
+__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int16x4_t __ret;
+ __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
+ return __ret;
+}
+#else
+__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vmov_n_p8(poly8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly8x8_t vmov_n_p8(poly8_t __p0) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vmov_n_p16(poly16_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly16x4_t vmov_n_p16(poly16_t __p0) {
+ poly16x4_t __ret;
+ __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vmovq_n_s8(int8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int8x16_t vmovq_n_s8(int8_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmovq_n_f32(float32_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai float32x4_t vmovq_n_f32(float32_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmovq_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
+ __ret; \
+})
+#else
+#define vmovq_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x8_t __ret; \
+ __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmovq_n_s32(int32_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int32x4_t vmovq_n_s32(int32_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmovq_n_s64(int64_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai int64x2_t vmovq_n_s64(int64_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmovq_n_s16(int16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int16x8_t vmovq_n_s16(int16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmov_n_u8(uint8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint8x8_t vmov_n_u8(uint8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmov_n_u32(uint32_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai uint32x2_t vmov_n_u32(uint32_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vmov_n_u64(uint64_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) {__p0};
+ return __ret;
+}
+#else
+__ai uint64x1_t vmov_n_u64(uint64_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) {__p0};
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmov_n_u16(uint16_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai uint16x4_t vmov_n_u16(uint16_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmov_n_s8(int8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int8x8_t vmov_n_s8(int8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmov_n_f32(float32_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai float32x2_t vmov_n_f32(float32_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmov_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
+ __ret; \
+})
+#else
+#define vmov_n_f16(__p0) __extension__ ({ \
+ float16_t __s0 = __p0; \
+ float16x4_t __ret; \
+ __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmov_n_s32(int32_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) {__p0, __p0};
+ return __ret;
+}
+#else
+__ai int32x2_t vmov_n_s32(int32_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) {__p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vmov_n_s64(int64_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) {__p0};
+ return __ret;
+}
+#else
+__ai int64x1_t vmov_n_s64(int64_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) {__p0};
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmov_n_s16(int16_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
+ return __ret;
+}
+#else
+__ai int16x4_t vmov_n_s16(int16_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmovl_s8(int8x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vmovl_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmovl_s32(int32x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vmovl_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmovl_s16(int16x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vmovl_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmovn_s32(int32x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vmovn_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmovn_s64(int64x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vmovn_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmovn_s16(int16x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vmovn_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 * __p1;
+ return __ret;
+}
+#else
+__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 * __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
+ return __ret;
+}
+#else
+__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x4_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x4_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x2_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
+ __ret; \
+})
+#else
+#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
+ return __ret;
+}
+#else
+__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
+ return __ret;
+}
+#else
+__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
+ float32x4_t __ret;
+ __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1};
+ return __ret;
+}
+#else
+__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1};
+ return __ret;
+}
+#else
+__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
+ return __ret;
+}
+#else
+__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 * (uint32x2_t) {__p1, __p1};
+ return __ret;
+}
+#else
+__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 * (uint32x2_t) {__p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
+ return __ret;
+}
+#else
+__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
+ float32x2_t __ret;
+ __ret = __p0 * (float32x2_t) {__p1, __p1};
+ return __ret;
+}
+#else
+__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 * (float32x2_t) {__p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 * (int32x2_t) {__p1, __p1};
+ return __ret;
+}
+#else
+__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 * (int32x2_t) {__p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1};
+ return __ret;
+}
+#else
+__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1};
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
+ return __ret;
+}
+#else
+__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
+ poly16x8_t __ret;
+ __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vmvnq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai int8x16_t vmvnq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vmvnq_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai int32x4_t vmvnq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vmvnq_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai int16x8_t vmvnq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vmvn_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai int8x8_t vmvn_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vmvn_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai int32x2_t vmvn_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vmvn_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = ~__p0;
+ return __ret;
+}
+#else
+__ai int16x4_t vmvn_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = ~__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vnegq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai int8x16_t vnegq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vnegq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai float32x4_t vnegq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vnegq_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai int32x4_t vnegq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vnegq_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai int16x8_t vnegq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vneg_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai int8x8_t vneg_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vneg_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai float32x2_t vneg_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vneg_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai int32x2_t vneg_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vneg_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = -__p0;
+ return __ret;
+}
+#else
+__ai int16x4_t vneg_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = -__rev0;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 | ~__p1;
+ return __ret;
+}
+#else
+__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 | ~__rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 | __p1;
+ return __ret;
+}
+#else
+__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 | __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vpaddl_s8(int8x8_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vpaddl_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vpaddl_s32(int32x2_t __p0) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vpaddl_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vpaddl_s16(int16x4_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vpaddl_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqabsq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqabsq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqabsq_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqabsq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqabsq_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqabsq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqabs_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqabs_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqabs_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqabs_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqabs_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqabs_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __ret; \
+ __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __s2 = __p2; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __ret; \
+ __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
+ __ret; \
+})
+#else
+#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __s2 = __p2; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqmovn_s32(int32x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqmovn_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqmovn_s64(int64x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqmovn_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqmovn_s16(int16x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqmovn_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqnegq_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqnegq_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqnegq_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqnegq_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqnegq_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqnegq_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqneg_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqneg_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqneg_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqneg_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqneg_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqneg_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x4_t __ret; \
+ __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
+ __ret; \
+})
+#else
+#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \
+ __ret; \
+})
+#else
+#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \
+ __ret; \
+})
+#else
+#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \
+ __ret; \
+})
+#else
+#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \
+ __ret; \
+})
+#else
+#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \
+ __ret; \
+})
+#else
+#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \
+ __ret; \
+})
+#else
+#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \
+ __ret; \
+})
+#else
+#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#else
+#define vqshl_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#else
+#define vqshl_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \
+ __ret; \
+})
+#else
+#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \
+ __ret; \
+})
+#else
+#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \
+ __ret; \
+})
+#else
+#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#else
+#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrecpe_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vrecpe_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
+ return __ret;
+}
+#else
+__ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
+ return __ret;
+}
+#else
+__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
+ return __ret;
+}
+#else
+__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vrev16q_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
+ return __ret;
+}
+#else
+__ai int8x16_t vrev16q_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
+ return __ret;
+}
+#else
+__ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vrev16_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
+ return __ret;
+}
+#else
+__ai int8x8_t vrev16_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
+ return __ret;
+}
+#else
+__ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ return __ret;
+}
+#else
+__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
+ return __ret;
+}
+#else
+__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ return __ret;
+}
+#else
+__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
+ return __ret;
+}
+#else
+__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vrev32q_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ return __ret;
+}
+#else
+__ai int8x16_t vrev32q_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vrev32q_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
+ return __ret;
+}
+#else
+__ai int16x8_t vrev32q_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
+ return __ret;
+}
+#else
+__ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
+ return __ret;
+}
+#else
+__ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vrev32_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
+ return __ret;
+}
+#else
+__ai int8x8_t vrev32_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vrev32_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
+ return __ret;
+}
+#else
+__ai int16x4_t vrev32_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#else
+__ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ return __ret;
+}
+#else
+__ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ poly16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ return __ret;
+}
+#else
+__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
+ return __ret;
+}
+#else
+__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ return __ret;
+}
+#else
+__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
+ return __ret;
+}
+#else
+__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
+ return __ret;
+}
+#else
+__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vrev64q_s8(int8x16_t __p0) {
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ return __ret;
+}
+#else
+__ai int8x16_t vrev64q_s8(int8x16_t __p0) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrev64q_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
+ return __ret;
+}
+#else
+__ai float32x4_t vrev64q_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vrev64q_s32(int32x4_t __p0) {
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
+ return __ret;
+}
+#else
+__ai int32x4_t vrev64q_s32(int32x4_t __p0) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vrev64q_s16(int16x8_t __p0) {
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
+ return __ret;
+}
+#else
+__ai int16x8_t vrev64q_s16(int16x8_t __p0) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#else
+__ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
+ return __ret;
+}
+#else
+__ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ return __ret;
+}
+#else
+__ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vrev64_s8(int8x8_t __p0) {
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vrev64_s8(int8x8_t __p0) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrev64_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
+ return __ret;
+}
+#else
+__ai float32x2_t vrev64_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vrev64_s32(int32x2_t __p0) {
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
+ return __ret;
+}
+#else
+__ai int32x2_t vrev64_s32(int32x2_t __p0) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vrev64_s16(int16x4_t __p0) {
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ return __ret;
+}
+#else
+__ai int16x4_t vrev64_s16(int16x4_t __p0) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \
+ __ret; \
+})
+#else
+#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \
+ __ret; \
+})
+#else
+#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \
+ __ret; \
+})
+#else
+#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \
+ __ret; \
+})
+#else
+#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \
+ __ret; \
+})
+#else
+#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \
+ __ret; \
+})
+#else
+#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \
+ __ret; \
+})
+#else
+#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#else
+#define vrshr_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#else
+#define vrshr_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
+ return __ret;
+}
+#else
+__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
+ return __ret;
+}
+#else
+__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
+ __ret; \
+})
+#else
+#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
+ __ret; \
+})
+#else
+#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
+ __ret; \
+})
+#else
+#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
+ __ret; \
+})
+#else
+#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
+ __ret; \
+})
+#else
+#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
+ __ret; \
+})
+#else
+#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
+ __ret; \
+})
+#else
+#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
+ __ret; \
+})
+#else
+#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
+ __ret; \
+})
+#else
+#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
+ __ret; \
+})
+#else
+#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#else
+#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
+ __ret; \
+})
+#else
+#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
+ __ret; \
+})
+#else
+#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
+ __ret; \
+})
+#else
+#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#else
+#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
+ __ret; \
+})
+#else
+#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __ret; \
+ __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#define __noswap_vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32_t __s0 = __p0; \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __ret; \
+ __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#define __noswap_vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#else
+#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
+ return __ret;
+}
+#else
+__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
+ return __ret;
+}
+#else
+__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
+ return __ret;
+}
+#else
+__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
+ return __ret;
+}
+#else
+__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
+ return __ret;
+}
+#else
+__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
+ return __ret;
+}
+#else
+__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
+ return __ret;
+}
+#else
+__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
+ return __ret;
+}
+#else
+__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#else
+__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#else
+__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \
+ __ret; \
+})
+#else
+#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \
+ __ret; \
+})
+#else
+#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \
+ __ret; \
+})
+#else
+#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \
+ __ret; \
+})
+#else
+#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \
+ __ret; \
+})
+#else
+#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \
+ __ret; \
+})
+#else
+#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \
+ __ret; \
+})
+#else
+#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vshl_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vshl_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#else
+#define vshl_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vshl_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vshl_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vshl_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#else
+#define vshl_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshl_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vshl_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
+ __ret; \
+})
+#else
+#define vshll_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
+ __ret; \
+})
+#else
+#define vshll_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vshll_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
+ __ret; \
+})
+#else
+#define vshll_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
+ __ret; \
+})
+#else
+#define vshll_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshll_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
+ __ret; \
+})
+#else
+#define vshll_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \
+ __ret; \
+})
+#else
+#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \
+ __ret; \
+})
+#else
+#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \
+ __ret; \
+})
+#else
+#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \
+ __ret; \
+})
+#else
+#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \
+ __ret; \
+})
+#else
+#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \
+ __ret; \
+})
+#else
+#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \
+ __ret; \
+})
+#else
+#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \
+ __ret; \
+})
+#else
+#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vshr_n_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vshr_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#else
+#define vshr_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vshr_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vshr_n_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vshr_n_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#else
+#define vshr_n_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshr_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vshr_n_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#else
+#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#else
+#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#else
+#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#else
+#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#else
+#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#else
+#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
+ __ret; \
+})
+#else
+#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
+ __ret; \
+})
+#else
+#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
+ __ret; \
+})
+#else
+#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
+ __ret; \
+})
+#else
+#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
+ __ret; \
+})
+#else
+#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
+ __ret; \
+})
+#else
+#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
+ __ret; \
+})
+#else
+#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
+ __ret; \
+})
+#else
+#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
+ __ret; \
+})
+#else
+#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
+ __ret; \
+})
+#else
+#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
+ __ret; \
+})
+#else
+#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
+ __ret; \
+})
+#else
+#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
+ __ret; \
+})
+#else
+#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
+ __ret; \
+})
+#else
+#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#else
+#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
+ __ret; \
+})
+#else
+#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
+ __ret; \
+})
+#else
+#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
+ __ret; \
+})
+#else
+#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#else
+#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
+ __ret; \
+})
+#else
+#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
+ __ret; \
+})
+#else
+#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
+ __ret; \
+})
+#else
+#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
+ __ret; \
+})
+#else
+#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
+ __ret; \
+})
+#else
+#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
+ __ret; \
+})
+#else
+#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
+ __ret; \
+})
+#else
+#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
+ __ret; \
+})
+#else
+#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
+ __ret; \
+})
+#else
+#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
+ __ret; \
+})
+#else
+#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
+ __ret; \
+})
+#else
+#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#else
+#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
+ __ret; \
+})
+#else
+#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
+ __ret; \
+})
+#else
+#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
+ __ret; \
+})
+#else
+#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#else
+#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
+ __ret; \
+})
+#else
+#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
+ __ret; \
+})
+#else
+#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s0 = __p0; \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x8_t __ret; \
+ __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
+ __ret; \
+})
+#else
+#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s0 = __p0; \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ poly16x4_t __ret; \
+ __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
+ __ret; \
+})
+#else
+#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s0 = __p0; \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly8x16_t __ret; \
+ __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
+ __ret; \
+})
+#else
+#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s0 = __p0; \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ poly16x8_t __ret; \
+ __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
+ __ret; \
+})
+#else
+#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s0 = __p0; \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x16_t __ret; \
+ __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
+ __ret; \
+})
+#else
+#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s0 = __p0; \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint32x4_t __ret; \
+ __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
+ __ret; \
+})
+#else
+#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s0 = __p0; \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint64x2_t __ret; \
+ __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
+ __ret; \
+})
+#else
+#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s0 = __p0; \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint16x8_t __ret; \
+ __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
+ __ret; \
+})
+#else
+#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s0 = __p0; \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x16_t __ret; \
+ __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
+ __ret; \
+})
+#else
+#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s0 = __p0; \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int32x4_t __ret; \
+ __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
+ __ret; \
+})
+#else
+#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s0 = __p0; \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int64x2_t __ret; \
+ __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
+ __ret; \
+})
+#else
+#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s0 = __p0; \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int16x8_t __ret; \
+ __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
+ __ret; \
+})
+#else
+#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s0 = __p0; \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ uint8x8_t __ret; \
+ __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
+ __ret; \
+})
+#else
+#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s0 = __p0; \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ uint32x2_t __ret; \
+ __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#else
+#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s0 = __p0; \
+ uint64x1_t __s1 = __p1; \
+ uint64x1_t __ret; \
+ __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
+ __ret; \
+})
+#else
+#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s0 = __p0; \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ uint16x4_t __ret; \
+ __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
+ __ret; \
+})
+#else
+#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s0 = __p0; \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ int8x8_t __ret; \
+ __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
+ __ret; \
+})
+#else
+#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s0 = __p0; \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ int32x2_t __ret; \
+ __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#else
+#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s0 = __p0; \
+ int64x1_t __s1 = __p1; \
+ int64x1_t __ret; \
+ __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
+ __ret; \
+})
+#else
+#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s0 = __p0; \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ int16x4_t __ret; \
+ __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
+ __ret; \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \
+})
+#else
+#define vst1_p8(__p0, __p1) __extension__ ({ \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \
+})
+#else
+#define vst1_p16(__p0, __p1) __extension__ ({ \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \
+})
+#else
+#define vst1q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \
+})
+#else
+#define vst1q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \
+})
+#else
+#define vst1q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \
+})
+#else
+#define vst1q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \
+})
+#else
+#define vst1q_u64(__p0, __p1) __extension__ ({ \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \
+})
+#else
+#define vst1q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \
+})
+#else
+#define vst1q_s8(__p0, __p1) __extension__ ({ \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \
+})
+#else
+#define vst1q_f32(__p0, __p1) __extension__ ({ \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_f16(__p0, __p1) __extension__ ({ \
+ float16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \
+})
+#else
+#define vst1q_f16(__p0, __p1) __extension__ ({ \
+ float16x8_t __s1 = __p1; \
+ float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \
+})
+#else
+#define vst1q_s32(__p0, __p1) __extension__ ({ \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \
+})
+#else
+#define vst1q_s64(__p0, __p1) __extension__ ({ \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \
+})
+#else
+#define vst1q_s16(__p0, __p1) __extension__ ({ \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \
+})
+#else
+#define vst1_u8(__p0, __p1) __extension__ ({ \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \
+})
+#else
+#define vst1_u32(__p0, __p1) __extension__ ({ \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
+})
+#else
+#define vst1_u64(__p0, __p1) __extension__ ({ \
+ uint64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \
+})
+#else
+#define vst1_u16(__p0, __p1) __extension__ ({ \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \
+})
+#else
+#define vst1_s8(__p0, __p1) __extension__ ({ \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \
+})
+#else
+#define vst1_f32(__p0, __p1) __extension__ ({ \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_f16(__p0, __p1) __extension__ ({ \
+ float16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \
+})
+#else
+#define vst1_f16(__p0, __p1) __extension__ ({ \
+ float16x4_t __s1 = __p1; \
+ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \
+})
+#else
+#define vst1_s32(__p0, __p1) __extension__ ({ \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
+})
+#else
+#define vst1_s64(__p0, __p1) __extension__ ({ \
+ int64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \
+})
+#else
+#define vst1_s16(__p0, __p1) __extension__ ({ \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
+})
+#else
+#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8_t __s1 = __p1; \
+ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
+})
+#else
+#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4_t __s1 = __p1; \
+ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
+})
+#else
+#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x16_t __s1 = __p1; \
+ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
+})
+#else
+#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8_t __s1 = __p1; \
+ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
+})
+#else
+#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x16_t __s1 = __p1; \
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
+})
+#else
+#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4_t __s1 = __p1; \
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
+})
+#else
+#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x2_t __s1 = __p1; \
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
+})
+#else
+#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8_t __s1 = __p1; \
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
+})
+#else
+#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x16_t __s1 = __p1; \
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
+})
+#else
+#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4_t __s1 = __p1; \
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
+})
+#else
+#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8_t __s1 = __p1; \
+ float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
+})
+#else
+#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4_t __s1 = __p1; \
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
+})
+#else
+#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x2_t __s1 = __p1; \
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s1 = __p1; \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
+})
+#else
+#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8_t __s1 = __p1; \
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
+})
+#else
+#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8_t __s1 = __p1; \
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
+})
+#else
+#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2_t __s1 = __p1; \
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
+})
+#else
+#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
+ uint64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
+})
+#else
+#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4_t __s1 = __p1; \
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
+})
+#else
+#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8_t __s1 = __p1; \
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
+})
+#else
+#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2_t __s1 = __p1; \
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
+})
+#else
+#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4_t __s1 = __p1; \
+ float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
+})
+#else
+#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2_t __s1 = __p1; \
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
+})
+#else
+#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
+ int64x1_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s1 = __p1; \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
+})
+#else
+#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4_t __s1 = __p1; \
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
+ __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_p8(__p0, __p1) __extension__ ({ \
+ poly8x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
+})
+#else
+#define vst2_p8(__p0, __p1) __extension__ ({ \
+ poly8x8x2_t __s1 = __p1; \
+ poly8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_p16(__p0, __p1) __extension__ ({ \
+ poly16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
+})
+#else
+#define vst2_p16(__p0, __p1) __extension__ ({ \
+ poly16x4x2_t __s1 = __p1; \
+ poly16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
+})
+#else
+#define vst2q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16x2_t __s1 = __p1; \
+ poly8x16x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
+})
+#else
+#define vst2q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8x2_t __s1 = __p1; \
+ poly16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
+})
+#else
+#define vst2q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16x2_t __s1 = __p1; \
+ uint8x16x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
+})
+#else
+#define vst2q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4x2_t __s1 = __p1; \
+ uint32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
+})
+#else
+#define vst2q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8x2_t __s1 = __p1; \
+ uint16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_s8(__p0, __p1) __extension__ ({ \
+ int8x16x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
+})
+#else
+#define vst2q_s8(__p0, __p1) __extension__ ({ \
+ int8x16x2_t __s1 = __p1; \
+ int8x16x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_f32(__p0, __p1) __extension__ ({ \
+ float32x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 41); \
+})
+#else
+#define vst2q_f32(__p0, __p1) __extension__ ({ \
+ float32x4x2_t __s1 = __p1; \
+ float32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_f16(__p0, __p1) __extension__ ({ \
+ float16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 40); \
+})
+#else
+#define vst2q_f16(__p0, __p1) __extension__ ({ \
+ float16x8x2_t __s1 = __p1; \
+ float16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_s32(__p0, __p1) __extension__ ({ \
+ int32x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 34); \
+})
+#else
+#define vst2q_s32(__p0, __p1) __extension__ ({ \
+ int32x4x2_t __s1 = __p1; \
+ int32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_s16(__p0, __p1) __extension__ ({ \
+ int16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 33); \
+})
+#else
+#define vst2q_s16(__p0, __p1) __extension__ ({ \
+ int16x8x2_t __s1 = __p1; \
+ int16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_u8(__p0, __p1) __extension__ ({ \
+ uint8x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
+})
+#else
+#define vst2_u8(__p0, __p1) __extension__ ({ \
+ uint8x8x2_t __s1 = __p1; \
+ uint8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_u32(__p0, __p1) __extension__ ({ \
+ uint32x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
+})
+#else
+#define vst2_u32(__p0, __p1) __extension__ ({ \
+ uint32x2x2_t __s1 = __p1; \
+ uint32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_u64(__p0, __p1) __extension__ ({ \
+ uint64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
+})
+#else
+#define vst2_u64(__p0, __p1) __extension__ ({ \
+ uint64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_u16(__p0, __p1) __extension__ ({ \
+ uint16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
+})
+#else
+#define vst2_u16(__p0, __p1) __extension__ ({ \
+ uint16x4x2_t __s1 = __p1; \
+ uint16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_s8(__p0, __p1) __extension__ ({ \
+ int8x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
+})
+#else
+#define vst2_s8(__p0, __p1) __extension__ ({ \
+ int8x8x2_t __s1 = __p1; \
+ int8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_f32(__p0, __p1) __extension__ ({ \
+ float32x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 9); \
+})
+#else
+#define vst2_f32(__p0, __p1) __extension__ ({ \
+ float32x2x2_t __s1 = __p1; \
+ float32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_f16(__p0, __p1) __extension__ ({ \
+ float16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 8); \
+})
+#else
+#define vst2_f16(__p0, __p1) __extension__ ({ \
+ float16x4x2_t __s1 = __p1; \
+ float16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_s32(__p0, __p1) __extension__ ({ \
+ int32x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 2); \
+})
+#else
+#define vst2_s32(__p0, __p1) __extension__ ({ \
+ int32x2x2_t __s1 = __p1; \
+ int32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_s64(__p0, __p1) __extension__ ({ \
+ int64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
+})
+#else
+#define vst2_s64(__p0, __p1) __extension__ ({ \
+ int64x1x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_s16(__p0, __p1) __extension__ ({ \
+ int16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 1); \
+})
+#else
+#define vst2_s16(__p0, __p1) __extension__ ({ \
+ int16x4x2_t __s1 = __p1; \
+ int16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
+})
+#else
+#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x2_t __s1 = __p1; \
+ poly8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
+})
+#else
+#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x2_t __s1 = __p1; \
+ poly16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
+})
+#else
+#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x2_t __s1 = __p1; \
+ poly16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
+})
+#else
+#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x2_t __s1 = __p1; \
+ uint32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
+})
+#else
+#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x2_t __s1 = __p1; \
+ uint16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 41); \
+})
+#else
+#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x2_t __s1 = __p1; \
+ float32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 40); \
+})
+#else
+#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x2_t __s1 = __p1; \
+ float16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 34); \
+})
+#else
+#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x2_t __s1 = __p1; \
+ int32x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 33); \
+})
+#else
+#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x2_t __s1 = __p1; \
+ int16x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
+})
+#else
+#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x2_t __s1 = __p1; \
+ uint8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
+})
+#else
+#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x2_t __s1 = __p1; \
+ uint32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
+})
+#else
+#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x2_t __s1 = __p1; \
+ uint16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
+})
+#else
+#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x2_t __s1 = __p1; \
+ int8x8x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 9); \
+})
+#else
+#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x2_t __s1 = __p1; \
+ float32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 8); \
+})
+#else
+#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x2_t __s1 = __p1; \
+ float16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 2); \
+})
+#else
+#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x2_t __s1 = __p1; \
+ int32x2x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x2_t __s1 = __p1; \
+ __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 1); \
+})
+#else
+#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x2_t __s1 = __p1; \
+ int16x4x2_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_p8(__p0, __p1) __extension__ ({ \
+ poly8x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
+})
+#else
+#define vst3_p8(__p0, __p1) __extension__ ({ \
+ poly8x8x3_t __s1 = __p1; \
+ poly8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_p16(__p0, __p1) __extension__ ({ \
+ poly16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
+})
+#else
+#define vst3_p16(__p0, __p1) __extension__ ({ \
+ poly16x4x3_t __s1 = __p1; \
+ poly16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
+})
+#else
+#define vst3q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16x3_t __s1 = __p1; \
+ poly8x16x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
+})
+#else
+#define vst3q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8x3_t __s1 = __p1; \
+ poly16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
+})
+#else
+#define vst3q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16x3_t __s1 = __p1; \
+ uint8x16x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
+})
+#else
+#define vst3q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4x3_t __s1 = __p1; \
+ uint32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
+})
+#else
+#define vst3q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8x3_t __s1 = __p1; \
+ uint16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_s8(__p0, __p1) __extension__ ({ \
+ int8x16x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
+})
+#else
+#define vst3q_s8(__p0, __p1) __extension__ ({ \
+ int8x16x3_t __s1 = __p1; \
+ int8x16x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_f32(__p0, __p1) __extension__ ({ \
+ float32x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
+})
+#else
+#define vst3q_f32(__p0, __p1) __extension__ ({ \
+ float32x4x3_t __s1 = __p1; \
+ float32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_f16(__p0, __p1) __extension__ ({ \
+ float16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
+})
+#else
+#define vst3q_f16(__p0, __p1) __extension__ ({ \
+ float16x8x3_t __s1 = __p1; \
+ float16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_s32(__p0, __p1) __extension__ ({ \
+ int32x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
+})
+#else
+#define vst3q_s32(__p0, __p1) __extension__ ({ \
+ int32x4x3_t __s1 = __p1; \
+ int32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_s16(__p0, __p1) __extension__ ({ \
+ int16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
+})
+#else
+#define vst3q_s16(__p0, __p1) __extension__ ({ \
+ int16x8x3_t __s1 = __p1; \
+ int16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_u8(__p0, __p1) __extension__ ({ \
+ uint8x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
+})
+#else
+#define vst3_u8(__p0, __p1) __extension__ ({ \
+ uint8x8x3_t __s1 = __p1; \
+ uint8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_u32(__p0, __p1) __extension__ ({ \
+ uint32x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
+})
+#else
+#define vst3_u32(__p0, __p1) __extension__ ({ \
+ uint32x2x3_t __s1 = __p1; \
+ uint32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_u64(__p0, __p1) __extension__ ({ \
+ uint64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
+})
+#else
+#define vst3_u64(__p0, __p1) __extension__ ({ \
+ uint64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_u16(__p0, __p1) __extension__ ({ \
+ uint16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
+})
+#else
+#define vst3_u16(__p0, __p1) __extension__ ({ \
+ uint16x4x3_t __s1 = __p1; \
+ uint16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_s8(__p0, __p1) __extension__ ({ \
+ int8x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
+})
+#else
+#define vst3_s8(__p0, __p1) __extension__ ({ \
+ int8x8x3_t __s1 = __p1; \
+ int8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_f32(__p0, __p1) __extension__ ({ \
+ float32x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
+})
+#else
+#define vst3_f32(__p0, __p1) __extension__ ({ \
+ float32x2x3_t __s1 = __p1; \
+ float32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_f16(__p0, __p1) __extension__ ({ \
+ float16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
+})
+#else
+#define vst3_f16(__p0, __p1) __extension__ ({ \
+ float16x4x3_t __s1 = __p1; \
+ float16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_s32(__p0, __p1) __extension__ ({ \
+ int32x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
+})
+#else
+#define vst3_s32(__p0, __p1) __extension__ ({ \
+ int32x2x3_t __s1 = __p1; \
+ int32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_s64(__p0, __p1) __extension__ ({ \
+ int64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
+})
+#else
+#define vst3_s64(__p0, __p1) __extension__ ({ \
+ int64x1x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_s16(__p0, __p1) __extension__ ({ \
+ int16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
+})
+#else
+#define vst3_s16(__p0, __p1) __extension__ ({ \
+ int16x4x3_t __s1 = __p1; \
+ int16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
+})
+#else
+#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x3_t __s1 = __p1; \
+ poly8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
+})
+#else
+#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x3_t __s1 = __p1; \
+ poly16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
+})
+#else
+#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x3_t __s1 = __p1; \
+ poly16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
+})
+#else
+#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x3_t __s1 = __p1; \
+ uint32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
+})
+#else
+#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x3_t __s1 = __p1; \
+ uint16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
+})
+#else
+#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x3_t __s1 = __p1; \
+ float32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
+})
+#else
+#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x3_t __s1 = __p1; \
+ float16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
+})
+#else
+#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x3_t __s1 = __p1; \
+ int32x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
+})
+#else
+#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x3_t __s1 = __p1; \
+ int16x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
+})
+#else
+#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x3_t __s1 = __p1; \
+ uint8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
+})
+#else
+#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x3_t __s1 = __p1; \
+ uint32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
+})
+#else
+#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x3_t __s1 = __p1; \
+ uint16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
+})
+#else
+#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x3_t __s1 = __p1; \
+ int8x8x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
+})
+#else
+#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x3_t __s1 = __p1; \
+ float32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
+})
+#else
+#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x3_t __s1 = __p1; \
+ float16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
+})
+#else
+#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x3_t __s1 = __p1; \
+ int32x2x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x3_t __s1 = __p1; \
+ __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
+})
+#else
+#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x3_t __s1 = __p1; \
+ int16x4x3_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_p8(__p0, __p1) __extension__ ({ \
+ poly8x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
+})
+#else
+#define vst4_p8(__p0, __p1) __extension__ ({ \
+ poly8x8x4_t __s1 = __p1; \
+ poly8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_p16(__p0, __p1) __extension__ ({ \
+ poly16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
+})
+#else
+#define vst4_p16(__p0, __p1) __extension__ ({ \
+ poly16x4x4_t __s1 = __p1; \
+ poly16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
+})
+#else
+#define vst4q_p8(__p0, __p1) __extension__ ({ \
+ poly8x16x4_t __s1 = __p1; \
+ poly8x16x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
+})
+#else
+#define vst4q_p16(__p0, __p1) __extension__ ({ \
+ poly16x8x4_t __s1 = __p1; \
+ poly16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
+})
+#else
+#define vst4q_u8(__p0, __p1) __extension__ ({ \
+ uint8x16x4_t __s1 = __p1; \
+ uint8x16x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
+})
+#else
+#define vst4q_u32(__p0, __p1) __extension__ ({ \
+ uint32x4x4_t __s1 = __p1; \
+ uint32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
+})
+#else
+#define vst4q_u16(__p0, __p1) __extension__ ({ \
+ uint16x8x4_t __s1 = __p1; \
+ uint16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_s8(__p0, __p1) __extension__ ({ \
+ int8x16x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
+})
+#else
+#define vst4q_s8(__p0, __p1) __extension__ ({ \
+ int8x16x4_t __s1 = __p1; \
+ int8x16x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_f32(__p0, __p1) __extension__ ({ \
+ float32x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
+})
+#else
+#define vst4q_f32(__p0, __p1) __extension__ ({ \
+ float32x4x4_t __s1 = __p1; \
+ float32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_f16(__p0, __p1) __extension__ ({ \
+ float16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
+})
+#else
+#define vst4q_f16(__p0, __p1) __extension__ ({ \
+ float16x8x4_t __s1 = __p1; \
+ float16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_s32(__p0, __p1) __extension__ ({ \
+ int32x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
+})
+#else
+#define vst4q_s32(__p0, __p1) __extension__ ({ \
+ int32x4x4_t __s1 = __p1; \
+ int32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_s16(__p0, __p1) __extension__ ({ \
+ int16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
+})
+#else
+#define vst4q_s16(__p0, __p1) __extension__ ({ \
+ int16x8x4_t __s1 = __p1; \
+ int16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_u8(__p0, __p1) __extension__ ({ \
+ uint8x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
+})
+#else
+#define vst4_u8(__p0, __p1) __extension__ ({ \
+ uint8x8x4_t __s1 = __p1; \
+ uint8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_u32(__p0, __p1) __extension__ ({ \
+ uint32x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
+})
+#else
+#define vst4_u32(__p0, __p1) __extension__ ({ \
+ uint32x2x4_t __s1 = __p1; \
+ uint32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_u64(__p0, __p1) __extension__ ({ \
+ uint64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
+})
+#else
+#define vst4_u64(__p0, __p1) __extension__ ({ \
+ uint64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_u16(__p0, __p1) __extension__ ({ \
+ uint16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
+})
+#else
+#define vst4_u16(__p0, __p1) __extension__ ({ \
+ uint16x4x4_t __s1 = __p1; \
+ uint16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_s8(__p0, __p1) __extension__ ({ \
+ int8x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
+})
+#else
+#define vst4_s8(__p0, __p1) __extension__ ({ \
+ int8x8x4_t __s1 = __p1; \
+ int8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_f32(__p0, __p1) __extension__ ({ \
+ float32x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
+})
+#else
+#define vst4_f32(__p0, __p1) __extension__ ({ \
+ float32x2x4_t __s1 = __p1; \
+ float32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_f16(__p0, __p1) __extension__ ({ \
+ float16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
+})
+#else
+#define vst4_f16(__p0, __p1) __extension__ ({ \
+ float16x4x4_t __s1 = __p1; \
+ float16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_s32(__p0, __p1) __extension__ ({ \
+ int32x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
+})
+#else
+#define vst4_s32(__p0, __p1) __extension__ ({ \
+ int32x2x4_t __s1 = __p1; \
+ int32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_s64(__p0, __p1) __extension__ ({ \
+ int64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
+})
+#else
+#define vst4_s64(__p0, __p1) __extension__ ({ \
+ int64x1x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_s16(__p0, __p1) __extension__ ({ \
+ int16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
+})
+#else
+#define vst4_s16(__p0, __p1) __extension__ ({ \
+ int16x4x4_t __s1 = __p1; \
+ int16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
+})
+#else
+#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
+ poly8x8x4_t __s1 = __p1; \
+ poly8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
+})
+#else
+#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x4x4_t __s1 = __p1; \
+ poly16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
+})
+#else
+#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
+ poly16x8x4_t __s1 = __p1; \
+ poly16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
+})
+#else
+#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x4x4_t __s1 = __p1; \
+ uint32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
+})
+#else
+#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x8x4_t __s1 = __p1; \
+ uint16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
+})
+#else
+#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x4x4_t __s1 = __p1; \
+ float32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
+})
+#else
+#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x8x4_t __s1 = __p1; \
+ float16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
+})
+#else
+#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x4x4_t __s1 = __p1; \
+ int32x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
+})
+#else
+#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x8x4_t __s1 = __p1; \
+ int16x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
+})
+#else
+#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
+ uint8x8x4_t __s1 = __p1; \
+ uint8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
+})
+#else
+#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
+ uint32x2x4_t __s1 = __p1; \
+ uint32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
+})
+#else
+#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
+ uint16x4x4_t __s1 = __p1; \
+ uint16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
+})
+#else
+#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
+ int8x8x4_t __s1 = __p1; \
+ int8x8x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
+})
+#else
+#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
+ float32x2x4_t __s1 = __p1; \
+ float32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
+})
+#else
+#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
+ float16x4x4_t __s1 = __p1; \
+ float16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
+})
+#else
+#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
+ int32x2x4_t __s1 = __p1; \
+ int32x2x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x4_t __s1 = __p1; \
+ __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
+})
+#else
+#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
+ int16x4x4_t __s1 = __p1; \
+ int16x4x4_t __rev1; \
+ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
+ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
+ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
+ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
+ __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
+})
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
+ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x16_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
+ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x16_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
+ float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ float32x4_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
+ uint64x1_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
+ float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ float32x2_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
+ int64x1_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __ret;
+ __ret = __p0 - __p1;
+ return __ret;
+}
+#else
+__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = __rev0 - __rev1;
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#else
+__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
+ uint16x4_t __ret;
+ __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#else
+__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
+ uint32x2_t __ret;
+ __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#else
+__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
+ int16x4_t __ret;
+ __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#else
+__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+__ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
+ int32x2_t __ret;
+ __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+__ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = vmovl_u8(__p0) - vmovl_u8(__p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = vmovl_u32(__p0) - vmovl_u32(__p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
+ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = vmovl_u16(__p0) - vmovl_u16(__p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
+ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int16x8_t __ret;
+ __ret = vmovl_s8(__p0) - vmovl_s8(__p1);
+ return __ret;
+}
+#else
+__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = vmovl_s32(__p0) - vmovl_s32(__p1);
+ return __ret;
+}
+#else
+__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
+ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = vmovl_s16(__p0) - vmovl_s16(__p1);
+ return __ret;
+}
+#else
+__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
+ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
+ uint16x8_t __ret;
+ __ret = __p0 - vmovl_u8(__p1);
+ return __ret;
+}
+#else
+__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
+ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint16x8_t __ret;
+ __ret = __rev0 - __noswap_vmovl_u8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
+ uint64x2_t __ret;
+ __ret = __p0 - vmovl_u32(__p1);
+ return __ret;
+}
+#else
+__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
+ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ uint64x2_t __ret;
+ __ret = __rev0 - __noswap_vmovl_u32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
+ uint32x4_t __ret;
+ __ret = __p0 - vmovl_u16(__p1);
+ return __ret;
+}
+#else
+__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
+ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ uint32x4_t __ret;
+ __ret = __rev0 - __noswap_vmovl_u16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
+ int16x8_t __ret;
+ __ret = __p0 - vmovl_s8(__p1);
+ return __ret;
+}
+#else
+__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
+ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int16x8_t __ret;
+ __ret = __rev0 - __noswap_vmovl_s8(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
+ int64x2_t __ret;
+ __ret = __p0 - vmovl_s32(__p1);
+ return __ret;
+}
+#else
+__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
+ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
+ int64x2_t __ret;
+ __ret = __rev0 - __noswap_vmovl_s32(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
+ int32x4_t __ret;
+ __ret = __p0 - vmovl_s16(__p1);
+ return __ret;
+}
+#else
+__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
+ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
+ int32x4_t __ret;
+ __ret = __rev0 - __noswap_vmovl_s16(__rev1);
+ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
+ poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
+ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
+ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
+__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
+ poly8x8x2_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16);
+ return __ret;
+}
+#else
+__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
+ uint8x8x2_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ uint8x8_t __ret;
+ __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0);
+ return __ret;
+}
+#else
+__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
+ int8x8x2_t __rev0;
+ __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
+ __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
+ int8x8_t __ret;
+ __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0);
+ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
+ return __ret;
+}
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
+ poly8x8_t __ret;
+ __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4);
+ return __ret;
+}
+#else
[... 40969 lines stripped ...]
More information about the lldb-commits
mailing list