r262964 - AArch64: remove tests of intrinsics completely duplicated elsewhere.
Tim Northover via cfe-commits
cfe-commits at lists.llvm.org
Tue Mar 8 15:10:58 PST 2016
Author: tnorthover
Date: Tue Mar 8 17:10:58 2016
New Revision: 262964
URL: http://llvm.org/viewvc/llvm-project?rev=262964&view=rev
Log:
AArch64: remove tests of intrinsics completely duplicated elsewhere.
Removed:
cfe/trunk/test/CodeGen/arm64_neon_high_half.c
cfe/trunk/test/CodeGen/arm64_vCMP.c
cfe/trunk/test/CodeGen/arm64_vLdStNum_lane.c
cfe/trunk/test/CodeGen/arm64_vMaxMin.c
cfe/trunk/test/CodeGen/arm64_vadd.c
cfe/trunk/test/CodeGen/arm64_vca.c
cfe/trunk/test/CodeGen/arm64_vcvtfp.c
cfe/trunk/test/CodeGen/arm64_vecCmpBr.c
cfe/trunk/test/CodeGen/arm64_vext.c
cfe/trunk/test/CodeGen/arm64_vfma.c
cfe/trunk/test/CodeGen/arm64_vneg.c
cfe/trunk/test/CodeGen/arm64_vqmov.c
cfe/trunk/test/CodeGen/arm64_vrecps.c
cfe/trunk/test/CodeGen/arm64_vshift.c
cfe/trunk/test/CodeGen/arm64_vsli.c
cfe/trunk/test/CodeGen/arm64_vsri.c
cfe/trunk/test/CodeGen/arm64_vtst.c
Removed: cfe/trunk/test/CodeGen/arm64_neon_high_half.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_neon_high_half.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_neon_high_half.c (original)
+++ cfe/trunk/test/CodeGen/arm64_neon_high_half.c (removed)
@@ -1,559 +0,0 @@
-// RUN: %clang_cc1 -triple arm64-apple-ios7.0 -target-feature +neon -ffreestanding -Os -S -o - %s | FileCheck %s
-// REQUIRES: aarch64-registered-target
-
-#include <arm_neon.h>
-
-int16x8_t test_vaddw_high_s8(int16x8_t lhs, int8x16_t rhs) {
- // CHECK: saddw2.8h
- return vaddw_high_s8(lhs, rhs);
-}
-
-int32x4_t test_vaddw_high_s16(int32x4_t lhs, int16x8_t rhs) {
- // CHECK: saddw2.4s
- return vaddw_high_s16(lhs, rhs);
-}
-
-int64x2_t test_vaddw_high_s32(int64x2_t lhs, int32x4_t rhs) {
- // CHECK: saddw2.2d
- return vaddw_high_s32(lhs, rhs);
-}
-
-uint16x8_t test_vaddw_high_u8(uint16x8_t lhs, uint8x16_t rhs) {
- // CHECK: uaddw2.8h
- return vaddw_high_u8(lhs, rhs);
-}
-
-uint32x4_t test_vaddw_high_u16(uint32x4_t lhs, uint16x8_t rhs) {
- // CHECK: uaddw2.4s
- return vaddw_high_u16(lhs, rhs);
-}
-
-uint64x2_t test_vaddw_high_u32(uint64x2_t lhs, uint32x4_t rhs) {
- // CHECK: uaddw2.2d
- return vaddw_high_u32(lhs, rhs);
-}
-
-int16x8_t test_vsubw_high_s8(int16x8_t lhs, int8x16_t rhs) {
- // CHECK: ssubw2.8h
- return vsubw_high_s8(lhs, rhs);
-}
-
-int32x4_t test_vsubw_high_s16(int32x4_t lhs, int16x8_t rhs) {
- // CHECK: ssubw2.4s
- return vsubw_high_s16(lhs, rhs);
-}
-
-int64x2_t test_vsubw_high_s32(int64x2_t lhs, int32x4_t rhs) {
- // CHECK: ssubw2.2d
- return vsubw_high_s32(lhs, rhs);
-}
-
-uint16x8_t test_vsubw_high_u8(uint16x8_t lhs, uint8x16_t rhs) {
- // CHECK: usubw2.8h
- return vsubw_high_u8(lhs, rhs);
-}
-
-uint32x4_t test_vsubw_high_u16(uint32x4_t lhs, uint16x8_t rhs) {
- // CHECK: usubw2.4s
- return vsubw_high_u16(lhs, rhs);
-}
-
-uint64x2_t test_vsubw_high_u32(uint64x2_t lhs, uint32x4_t rhs) {
- // CHECK: usubw2.2d
- return vsubw_high_u32(lhs, rhs);
-}
-
-int16x8_t test_vabdl_high_s8(int8x16_t lhs, int8x16_t rhs) {
- // CHECK: sabdl2.8h
- return vabdl_high_s8(lhs, rhs);
-}
-
-int32x4_t test_vabdl_high_s16(int16x8_t lhs, int16x8_t rhs) {
- // CHECK: sabdl2.4s
- return vabdl_high_s16(lhs, rhs);
-}
-
-int64x2_t test_vabdl_high_s32(int32x4_t lhs, int32x4_t rhs) {
- // CHECK: sabdl2.2d
- return vabdl_high_s32(lhs, rhs);
-}
-
-uint16x8_t test_vabdl_high_u8(uint8x16_t lhs, uint8x16_t rhs) {
- // CHECK: uabdl2.8h
- return vabdl_high_u8(lhs, rhs);
-}
-
-uint32x4_t test_vabdl_high_u16(uint16x8_t lhs, uint16x8_t rhs) {
- // CHECK: uabdl2.4s
- return vabdl_high_u16(lhs, rhs);
-}
-
-uint64x2_t test_vabdl_high_u32(uint32x4_t lhs, uint32x4_t rhs) {
- // CHECK: uabdl2.2d
- return vabdl_high_u32(lhs, rhs);
-}
-
-int16x8_t test_vabal_high_s8(int16x8_t accum, int8x16_t lhs, int8x16_t rhs) {
- // CHECK: sabal2.8h
- return vabal_high_s8(accum, lhs, rhs);
-}
-
-int32x4_t test_vabal_high_s16(int32x4_t accum, int16x8_t lhs, int16x8_t rhs) {
- // CHECK: sabal2.4s
- return vabal_high_s16(accum, lhs, rhs);
-}
-
-int64x2_t test_vabal_high_s32(int64x2_t accum, int32x4_t lhs, int32x4_t rhs) {
- // CHECK: sabal2.2d
- return vabal_high_s32(accum, lhs, rhs);
-}
-
-uint16x8_t test_vabal_high_u8(uint16x8_t accum, uint8x16_t lhs, uint8x16_t rhs) {
- // CHECK: uabal2.8h
- return vabal_high_u8(accum, lhs, rhs);
-}
-
-uint32x4_t test_vabal_high_u16(uint32x4_t accum, uint16x8_t lhs, uint16x8_t rhs) {
- // CHECK: uabal2.4s
- return vabal_high_u16(accum, lhs, rhs);
-}
-
-uint64x2_t test_vabal_high_u32(uint64x2_t accum, uint32x4_t lhs, uint32x4_t rhs) {
- // CHECK: uabal2.2d
- return vabal_high_u32(accum, lhs, rhs);
-}
-
-int32x4_t test_vqdmlal_high_s16(int32x4_t accum, int16x8_t lhs, int16x8_t rhs) {
- // CHECK: sqdmlal2.4s
- return vqdmlal_high_s16(accum, lhs, rhs);
-}
-
-int64x2_t test_vqdmlal_high_s32(int64x2_t accum, int32x4_t lhs, int32x4_t rhs) {
- // CHECK: sqdmlal2.2d
- return vqdmlal_high_s32(accum, lhs, rhs);
-}
-
-int32x4_t test_vqdmlsl_high_s16(int32x4_t accum, int16x8_t lhs, int16x8_t rhs) {
- // CHECK: sqdmlsl2.4s
- return vqdmlsl_high_s16(accum, lhs, rhs);
-}
-
-int64x2_t test_vqdmlsl_high_s32(int64x2_t accum, int32x4_t lhs, int32x4_t rhs) {
- // CHECK: sqdmlsl2.2d
- return vqdmlsl_high_s32(accum, lhs, rhs);
-}
-
-int32x4_t test_vqdmull_high_s16(int16x8_t lhs, int16x8_t rhs) {
- // CHECK: sqdmull2.4s
- return vqdmull_high_s16(lhs, rhs);
-}
-
-int64x2_t test_vqdmull_high_s32(int32x4_t lhs, int32x4_t rhs) {
- // CHECK: sqdmull2.2d
- return vqdmull_high_s32(lhs, rhs);
-}
-
-int16x8_t test_vshll_high_n_s8(int8x16_t in) {
- // CHECK: sshll2.8h
- return vshll_high_n_s8(in, 7);
-}
-
-int32x4_t test_vshll_high_n_s16(int16x8_t in) {
- // CHECK: sshll2.4s
- return vshll_high_n_s16(in, 15);
-}
-
-int64x2_t test_vshll_high_n_s32(int32x4_t in) {
- // CHECK: sshll2.2d
- return vshll_high_n_s32(in, 31);
-}
-
-int16x8_t test_vshll_high_n_u8(int8x16_t in) {
- // CHECK: ushll2.8h
- return vshll_high_n_u8(in, 7);
-}
-
-int32x4_t test_vshll_high_n_u16(int16x8_t in) {
- // CHECK: ushll2.4s
- return vshll_high_n_u16(in, 15);
-}
-
-int64x2_t test_vshll_high_n_u32(int32x4_t in) {
- // CHECK: ushll2.2d
- return vshll_high_n_u32(in, 31);
-}
-
-int16x8_t test_vshll_high_n_s8_max(int8x16_t in) {
- // CHECK: shll2.8h
- return vshll_high_n_s8(in, 8);
-}
-
-int32x4_t test_vshll_high_n_s16_max(int16x8_t in) {
- // CHECK: shll2.4s
- return vshll_high_n_s16(in, 16);
-}
-
-int64x2_t test_vshll_high_n_s32_max(int32x4_t in) {
- // CHECK: shll2.2d
- return vshll_high_n_s32(in, 32);
-}
-
-int16x8_t test_vshll_high_n_u8_max(int8x16_t in) {
- // CHECK: shll2.8h
- return vshll_high_n_u8(in, 8);
-}
-
-int32x4_t test_vshll_high_n_u16_max(int16x8_t in) {
- // CHECK: shll2.4s
- return vshll_high_n_u16(in, 16);
-}
-
-int64x2_t test_vshll_high_n_u32_max(int32x4_t in) {
- // CHECK: shll2.2d
- return vshll_high_n_u32(in, 32);
-}
-
-int16x8_t test_vsubl_high_s8(int8x16_t lhs, int8x16_t rhs) {
- // CHECK: ssubl2.8h
- return vsubl_high_s8(lhs, rhs);
-}
-
-int32x4_t test_vsubl_high_s16(int16x8_t lhs, int16x8_t rhs) {
- // CHECK: ssubl2.4s
- return vsubl_high_s16(lhs, rhs);
-}
-
-int64x2_t test_vsubl_high_s32(int32x4_t lhs, int32x4_t rhs) {
- // CHECK: ssubl2.2d
- return vsubl_high_s32(lhs, rhs);
-}
-
-uint16x8_t test_vsubl_high_u8(uint8x16_t lhs, uint8x16_t rhs) {
- // CHECK: usubl2.8h
- return vsubl_high_u8(lhs, rhs);
-}
-
-uint32x4_t test_vsubl_high_u16(uint16x8_t lhs, uint16x8_t rhs) {
- // CHECK: usubl2.4s
- return vsubl_high_u16(lhs, rhs);
-}
-
-uint64x2_t test_vsubl_high_u32(uint32x4_t lhs, uint32x4_t rhs) {
- // CHECK: usubl2.2d
- return vsubl_high_u32(lhs, rhs);
-}
-
-int8x16_t test_vrshrn_high_n_s16(int8x8_t lowpart, int16x8_t input) {
- // CHECK: rshrn2.16b
- return vrshrn_high_n_s16(lowpart, input, 2);
-}
-
-int16x8_t test_vrshrn_high_n_s32(int16x4_t lowpart, int32x4_t input) {
- // CHECK: rshrn2.8h
- return vrshrn_high_n_s32(lowpart, input, 2);
-}
-
-int32x4_t test_vrshrn_high_n_s64(int32x2_t lowpart, int64x2_t input) {
- // CHECK: shrn2.4s
- return vrshrn_high_n_s64(lowpart, input, 2);
-}
-
-uint8x16_t test_vrshrn_high_n_u16(uint8x8_t lowpart, uint16x8_t input) {
- // CHECK: rshrn2.16b
- return vrshrn_high_n_u16(lowpart, input, 2);
-}
-
-uint16x8_t test_vrshrn_high_n_u32(uint16x4_t lowpart, uint32x4_t input) {
- // CHECK: rshrn2.8h
- return vrshrn_high_n_u32(lowpart, input, 2);
-}
-
-uint32x4_t test_vrshrn_high_n_u64(uint32x2_t lowpart, uint64x2_t input) {
- // CHECK: rshrn2.4s
- return vrshrn_high_n_u64(lowpart, input, 2);
-}
-
-int8x16_t test_vshrn_high_n_s16(int8x8_t lowpart, int16x8_t input) {
- // CHECK: shrn2.16b
- return vshrn_high_n_s16(lowpart, input, 2);
-}
-
-int16x8_t test_vshrn_high_n_s32(int16x4_t lowpart, int32x4_t input) {
- // CHECK: shrn2.8h
- return vshrn_high_n_s32(lowpart, input, 2);
-}
-
-int32x4_t test_vshrn_high_n_s64(int32x2_t lowpart, int64x2_t input) {
- // CHECK: shrn2.4s
- return vshrn_high_n_s64(lowpart, input, 2);
-}
-
-uint8x16_t test_vshrn_high_n_u16(uint8x8_t lowpart, uint16x8_t input) {
- // CHECK: shrn2.16b
- return vshrn_high_n_u16(lowpart, input, 2);
-}
-
-uint16x8_t test_vshrn_high_n_u32(uint16x4_t lowpart, uint32x4_t input) {
- // CHECK: shrn2.8h
- return vshrn_high_n_u32(lowpart, input, 2);
-}
-
-uint32x4_t test_vshrn_high_n_u64(uint32x2_t lowpart, uint64x2_t input) {
- // CHECK: shrn2.4s
- return vshrn_high_n_u64(lowpart, input, 2);
-}
-
-uint8x16_t test_vqshrun_high_n_s16(uint8x8_t lowpart, int16x8_t input) {
- // CHECK: sqshrun2.16b
- return vqshrun_high_n_s16(lowpart, input, 2);
-}
-
-uint16x8_t test_vqshrun_high_n_s32(uint16x4_t lowpart, int32x4_t input) {
- // CHECK: sqshrun2.8h
- return vqshrun_high_n_s32(lowpart, input, 2);
-}
-
-uint32x4_t test_vqshrun_high_n_s64(uint32x2_t lowpart, int64x2_t input) {
- // CHECK: sqshrun2.4s
- return vqshrun_high_n_s64(lowpart, input, 2);
-}
-
-uint8x16_t test_vqrshrun_high_n_s16(uint8x8_t lowpart, int16x8_t input) {
- // CHECK: sqrshrun2.16b
- return vqrshrun_high_n_s16(lowpart, input, 2);
-}
-
-uint16x8_t test_vqrshrun_high_n_s32(uint16x4_t lowpart, int32x4_t input) {
- // CHECK: sqrshrun2.8h
- return vqrshrun_high_n_s32(lowpart, input, 2);
-}
-
-uint32x4_t test_vqrshrun_high_n_s64(uint32x2_t lowpart, int64x2_t input) {
- // CHECK: sqrshrun2.4s
- return vqrshrun_high_n_s64(lowpart, input, 2);
-}
-
-int8x16_t test_vqshrn_high_n_s16(int8x8_t lowpart, int16x8_t input) {
- // CHECK: sqshrn2.16b
- return vqshrn_high_n_s16(lowpart, input, 2);
-}
-
-int16x8_t test_vqshrn_high_n_s32(int16x4_t lowpart, int32x4_t input) {
- // CHECK: sqshrn2.8h
- return vqshrn_high_n_s32(lowpart, input, 2);
-}
-
-int32x4_t test_vqshrn_high_n_s64(int32x2_t lowpart, int64x2_t input) {
- // CHECK: sqshrn2.4s
- return vqshrn_high_n_s64(lowpart, input, 2);
-}
-
-uint8x16_t test_vqshrn_high_n_u16(uint8x8_t lowpart, uint16x8_t input) {
- // CHECK: uqshrn2.16b
- return vqshrn_high_n_u16(lowpart, input, 2);
-}
-
-uint16x8_t test_vqshrn_high_n_u32(uint16x4_t lowpart, uint32x4_t input) {
- // CHECK: uqshrn2.8h
- return vqshrn_high_n_u32(lowpart, input, 2);
-}
-
-uint32x4_t test_vqshrn_high_n_u64(uint32x2_t lowpart, uint64x2_t input) {
- // CHECK: uqshrn2.4s
- return vqshrn_high_n_u64(lowpart, input, 2);
-}
-
-int8x16_t test_vqrshrn_high_n_s16(int8x8_t lowpart, int16x8_t input) {
- // CHECK: sqrshrn2.16b
- return vqrshrn_high_n_s16(lowpart, input, 2);
-}
-
-int16x8_t test_vqrshrn_high_n_s32(int16x4_t lowpart, int32x4_t input) {
- // CHECK: sqrshrn2.8h
- return vqrshrn_high_n_s32(lowpart, input, 2);
-}
-
-int32x4_t test_vqrshrn_high_n_s64(int32x2_t lowpart, int64x2_t input) {
- // CHECK: sqrshrn2.4s
- return vqrshrn_high_n_s64(lowpart, input, 2);
-}
-
-uint8x16_t test_vqrshrn_high_n_u16(uint8x8_t lowpart, uint16x8_t input) {
- // CHECK: uqrshrn2.16b
- return vqrshrn_high_n_u16(lowpart, input, 2);
-}
-
-uint16x8_t test_vqrshrn_high_n_u32(uint16x4_t lowpart, uint32x4_t input) {
- // CHECK: uqrshrn2.8h
- return vqrshrn_high_n_u32(lowpart, input, 2);
-}
-
-uint32x4_t test_vqrshrn_high_n_u64(uint32x2_t lowpart, uint64x2_t input) {
- // CHECK: uqrshrn2.4s
- return vqrshrn_high_n_u64(lowpart, input, 2);
-}
-
-int8x16_t test_vaddhn_high_s16(int8x8_t lowpart, int16x8_t lhs, int16x8_t rhs) {
- // CHECK: addhn2.16b v0, {{v1, v2|v2, v1}}
- return vaddhn_high_s16(lowpart, lhs, rhs);
-}
-
-int16x8_t test_vaddhn_high_s32(int16x4_t lowpart, int32x4_t lhs, int32x4_t rhs) {
- // CHECK: addhn2.8h v0, {{v1, v2|v2, v1}}
- return vaddhn_high_s32(lowpart, lhs, rhs);
-}
-
-int32x4_t test_vaddhn_high_s64(int32x2_t lowpart, int64x2_t lhs, int64x2_t rhs) {
- // CHECK: addhn2.4s v0, {{v1, v2|v2, v1}}
- return vaddhn_high_s64(lowpart, lhs, rhs);
-}
-
-uint8x16_t test_vaddhn_high_u16(uint8x8_t lowpart, uint16x8_t lhs, uint16x8_t rhs) {
- // CHECK: addhn2.16b v0, {{v1, v2|v2, v1}}
- return vaddhn_high_s16(lowpart, lhs, rhs);
-}
-
-uint16x8_t test_vaddhn_high_u32(uint16x4_t lowpart, uint32x4_t lhs, uint32x4_t rhs) {
- // CHECK: addhn2.8h v0, {{v1, v2|v2, v1}}
- return vaddhn_high_s32(lowpart, lhs, rhs);
-}
-
-uint32x4_t test_vaddhn_high_u64(uint32x2_t lowpart, uint64x2_t lhs, uint64x2_t rhs) {
- // CHECK: addhn2.4s v0, {{v1, v2|v2, v1}}
- return vaddhn_high_s64(lowpart, lhs, rhs);
-}
-
-int8x16_t test_vraddhn_high_s16(int8x8_t lowpart, int16x8_t lhs, int16x8_t rhs) {
- // CHECK: raddhn2.16b v0, v1, v2
- return vraddhn_high_s16(lowpart, lhs, rhs);
-}
-
-int16x8_t test_vraddhn_high_s32(int16x4_t lowpart, int32x4_t lhs, int32x4_t rhs) {
- // CHECK: raddhn2.8h v0, v1, v2
- return vraddhn_high_s32(lowpart, lhs, rhs);
-}
-
-int32x4_t test_vraddhn_high_s64(int32x2_t lowpart, int64x2_t lhs, int64x2_t rhs) {
- // CHECK: raddhn2.4s v0, v1, v2
- return vraddhn_high_s64(lowpart, lhs, rhs);
-}
-
-uint8x16_t test_vraddhn_high_u16(uint8x8_t lowpart, uint16x8_t lhs, uint16x8_t rhs) {
- // CHECK: raddhn2.16b v0, v1, v2
- return vraddhn_high_s16(lowpart, lhs, rhs);
-}
-
-uint16x8_t test_vraddhn_high_u32(uint16x4_t lowpart, uint32x4_t lhs, uint32x4_t rhs) {
- // CHECK: raddhn2.8h v0, v1, v2
- return vraddhn_high_s32(lowpart, lhs, rhs);
-}
-
-uint32x4_t test_vraddhn_high_u64(uint32x2_t lowpart, uint64x2_t lhs, uint64x2_t rhs) {
- // CHECK: raddhn2.4s v0, v1, v2
- return vraddhn_high_s64(lowpart, lhs, rhs);
-}
-
-int8x16_t test_vmovn_high_s16(int8x8_t lowpart, int16x8_t wide) {
- // CHECK: xtn2.16b v0, v1
- return vmovn_high_s16(lowpart, wide);
-}
-
-int16x8_t test_vmovn_high_s32(int16x4_t lowpart, int32x4_t wide) {
- // CHECK: xtn2.8h v0, v1
- return vmovn_high_s32(lowpart, wide);
-}
-
-int32x4_t test_vmovn_high_s64(int32x2_t lowpart, int64x2_t wide) {
- // CHECK: xtn2.4s v0, v1
- return vmovn_high_s64(lowpart, wide);
-}
-
-uint8x16_t test_vmovn_high_u16(uint8x8_t lowpart, uint16x8_t wide) {
- // CHECK: xtn2.16b v0, v1
- return vmovn_high_u16(lowpart, wide);
-}
-
-uint16x8_t test_vmovn_high_u32(uint16x4_t lowpart, uint32x4_t wide) {
- // CHECK: xtn2.8h v0, v1
- return vmovn_high_u32(lowpart, wide);
-}
-
-uint32x4_t test_vmovn_high_u64(uint32x2_t lowpart, uint64x2_t wide) {
- // CHECK: xtn2.4s v0, v1
- return vmovn_high_u64(lowpart, wide);
-}
-
-int8x16_t test_vqmovn_high_s16(int8x8_t lowpart, int16x8_t wide) {
- // CHECK: sqxtn2.16b v0, v1
- return vqmovn_high_s16(lowpart, wide);
-}
-
-int16x8_t test_vqmovn_high_s32(int16x4_t lowpart, int32x4_t wide) {
- // CHECK: sqxtn2.8h v0, v1
- return vqmovn_high_s32(lowpart, wide);
-}
-
-int32x4_t test_vqmovn_high_s64(int32x2_t lowpart, int64x2_t wide) {
- // CHECK: sqxtn2.4s v0, v1
- return vqmovn_high_s64(lowpart, wide);
-}
-
-uint8x16_t test_vqmovn_high_u16(uint8x8_t lowpart, int16x8_t wide) {
- // CHECK: uqxtn2.16b v0, v1
- return vqmovn_high_u16(lowpart, wide);
-}
-
-uint16x8_t test_vqmovn_high_u32(uint16x4_t lowpart, int32x4_t wide) {
- // CHECK: uqxtn2.8h v0, v1
- return vqmovn_high_u32(lowpart, wide);
-}
-
-uint32x4_t test_vqmovn_high_u64(uint32x2_t lowpart, int64x2_t wide) {
- // CHECK: uqxtn2.4s v0, v1
- return vqmovn_high_u64(lowpart, wide);
-}
-
-uint8x16_t test_vqmovun_high_s16(uint8x8_t lowpart, int16x8_t wide) {
- // CHECK: sqxtun2.16b v0, v1
- return vqmovun_high_s16(lowpart, wide);
-}
-
-uint16x8_t test_vqmovun_high_s32(uint16x4_t lowpart, int32x4_t wide) {
- // CHECK: sqxtun2.8h v0, v1
- return vqmovun_high_s32(lowpart, wide);
-}
-
-uint32x4_t test_vqmovun_high_s64(uint32x2_t lowpart, int64x2_t wide) {
- // CHECK: sqxtun2.4s v0, v1
- return vqmovun_high_s64(lowpart, wide);
-}
-
-float32x4_t test_vcvtx_high_f32_f64(float32x2_t lowpart, float64x2_t wide) {
- // CHECK: fcvtxn2 v0.4s, v1.2d
- return vcvtx_high_f32_f64(lowpart, wide);
-}
-
-float64x2_t test_vcvt_f64_f32(float32x2_t x) {
- // CHECK: fcvtl v0.2d, v0.2s
- return vcvt_f64_f32(x);
-}
-
-float64x2_t test_vcvt_high_f64_f32(float32x4_t x) {
- // CHECK: fcvtl2 v0.2d, v0.4s
- return vcvt_high_f64_f32(x);
-}
-
-float32x2_t test_vcvt_f32_f64(float64x2_t v) {
- // CHECK: fcvtn v0.2s, v0.2d
- return vcvt_f32_f64(v);
-}
-
-float32x4_t test_vcvt_high_f32_f64(float32x2_t x, float64x2_t v) {
- // CHECK: fcvtn2 v0.4s, v1.2d
- return vcvt_high_f32_f64(x, v);
-}
-
-float32x2_t test_vcvtx_f32_f64(float64x2_t v) {
- // CHECK: fcvtxn v0.2s, v0.2d
- return vcvtx_f32_f64(v);
-}
Removed: cfe/trunk/test/CodeGen/arm64_vCMP.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vCMP.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vCMP.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vCMP.c (removed)
@@ -1,108 +0,0 @@
-// RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
-
-// Test ARM64 SIMD fused multiply add intrinsics
-
-#include <arm_neon.h>
-
-int64x2_t test_vabsq_s64(int64x2_t a1) {
- // CHECK: test_vabsq_s64
- return vabsq_s64(a1);
- // CHECK: llvm.aarch64.neon.abs.v2i64
- // CHECK-NEXT: ret
-}
-
-int64_t test_vceqd_s64(int64_t a1, int64_t a2) {
- // CHECK: test_vceqd_s64
- return vceqd_s64(a1, a2);
- // CHECK: [[BIT:%[0-9a-zA-Z.]+]] = icmp eq i64 %a1, %a2
- // CHECK: sext i1 [[BIT]] to i64
-}
-
-int64_t test_vceqd_f64(float64_t a1, float64_t a2) {
- // CHECK: test_vceqd_f64
- return vceqd_f64(a1, a2);
- // CHECK: [[BIT:%[0-9a-zA-Z.]+]] = fcmp oeq double %a1, %a2
- // CHECK: sext i1 [[BIT]] to i64
-}
-
-uint64_t test_vcgtd_u64(uint64_t a1, uint64_t a2) {
- // CHECK: test_vcgtd_u64
- return vcgtd_u64(a1, a2);
- // CHECK: [[BIT:%[0-9a-zA-Z.]+]] = icmp ugt i64 %a1, %a2
- // CHECK: sext i1 [[BIT]] to i64
-}
-
-uint64_t test_vcled_u64(uint64_t a1, uint64_t a2) {
- // CHECK: test_vcled_u64
- return vcled_u64(a1, a2);
- // CHECK: [[BIT:%[0-9a-zA-Z.]+]] = icmp ule i64 %a1, %a2
- // CHECK: sext i1 [[BIT]] to i64
-}
-
-int64_t test_vceqzd_s64(int64_t a1) {
- // CHECK: test_vceqzd_s64
- return vceqzd_s64(a1);
- // CHECK: [[BIT:%[0-9a-zA-Z.]+]] = icmp eq i64 %a1, 0
- // CHECK: sext i1 [[BIT]] to i64
-}
-
-uint64x2_t test_vceqq_u64(uint64x2_t a1, uint64x2_t a2) {
- // CHECK: test_vceqq_u64
- return vceqq_u64(a1, a2);
- // CHECK: icmp eq <2 x i64> %a1, %a2
-}
-
-uint64x2_t test_vcgeq_s64(int64x2_t a1, int64x2_t a2) {
- // CHECK: test_vcgeq_s64
- return vcgeq_s64(a1, a2);
- // CHECK: icmp sge <2 x i64> %a1, %a2
-}
-
-uint64x2_t test_vcgeq_u64(uint64x2_t a1, uint64x2_t a2) {
- // CHECK: test_vcgeq_u64
- return vcgeq_u64(a1, a2);
- // CHECK: icmp uge <2 x i64> %a1, %a2
-}
-
-uint64x2_t test_vcgtq_s64(int64x2_t a1, int64x2_t a2) {
- // CHECK: test_vcgtq_s64
- return vcgtq_s64(a1, a2);
- // CHECK: icmp sgt <2 x i64> %a1, %a2
-}
-
-uint64x2_t test_vcgtq_u64(uint64x2_t a1, uint64x2_t a2) {
- // CHECK: test_vcgtq_u64
- return vcgtq_u64(a1, a2);
- // CHECK: icmp ugt <2 x i64> %a1, %a2
-}
-
-uint64x2_t test_vcleq_s64(int64x2_t a1, int64x2_t a2) {
- // CHECK: test_vcleq_s64
- return vcleq_s64(a1, a2);
- // CHECK: icmp sle <2 x i64> %a1, %a2
-}
-
-uint64x2_t test_vcleq_u64(uint64x2_t a1, uint64x2_t a2) {
- // CHECK: test_vcleq_u64
- return vcleq_u64(a1, a2);
- // CHECK: icmp ule <2 x i64> %a1, %a2
-}
-
-uint64x2_t test_vcltq_s64(int64x2_t a1, int64x2_t a2) {
- // CHECK: test_vcltq_s64
- return vcltq_s64(a1, a2);
- // CHECK: icmp slt <2 x i64> %a1, %a2
-}
-
-uint64x2_t test_vcltq_u64(uint64x2_t a1, uint64x2_t a2) {
- // CHECK: test_vcltq_u64
- return vcltq_u64(a1, a2);
- // CHECK: icmp ult <2 x i64> %a1, %a2
-}
-
-int64x2_t test_vqabsq_s64(int64x2_t a1) {
- // CHECK: test_vqabsq_s64
- return vqabsq_s64(a1);
- // CHECK: llvm.aarch64.neon.sqabs.v2i64(<2 x i64> %a1)
- // CHECK-NEXT: ret
-}
Removed: cfe/trunk/test/CodeGen/arm64_vLdStNum_lane.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vLdStNum_lane.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vLdStNum_lane.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vLdStNum_lane.c (removed)
@@ -1,141 +0,0 @@
-// RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
-// Test ARM64 SIMD load and stores of an N-element structure intrinsics
-
-#include <arm_neon.h>
-
-int64x2x2_t test_vld2q_lane_s64(const void * a1, int64x2x2_t a2) {
- // CHECK: test_vld2q_lane_s64
- return vld2q_lane_s64(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.ld2lane.v2i64.p0i8
-}
-
-uint64x2x2_t test_vld2q_lane_u64(const void * a1, uint64x2x2_t a2) {
- // CHECK: test_vld2q_lane_u64
- return vld2q_lane_u64(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.ld2lane.v2i64.p0i8
-}
-
-int64x1x2_t test_vld2_lane_s64(const void * a1, int64x1x2_t a2) {
- // CHECK: test_vld2_lane_s64
- return vld2_lane_s64(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld2lane.v1i64.p0i8
-}
-
-uint64x1x2_t test_vld2_lane_u64(const void * a1, uint64x1x2_t a2) {
- // CHECK: test_vld2_lane_u64
- return vld2_lane_u64(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld2lane.v1i64.p0i8
-}
-
-poly8x16x2_t test_vld2q_lane_p8(const void * a1, poly8x16x2_t a2) {
- // CHECK: test_vld2q_lane_p8
- return vld2q_lane_p8(a1, a2, 0);
- // CHECK: extractvalue {{.*}} 0{{ *$}}
- // CHECK: extractvalue {{.*}} 1{{ *$}}
-}
-
-uint8x16x2_t test_vld2q_lane_u8(const void * a1, uint8x16x2_t a2) {
- // CHECK: test_vld2q_lane_u8
- return vld2q_lane_u8(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld2lane.v16i8.p0i8
-}
-
-int64x2x3_t test_vld3q_lane_s64(const void * a1, int64x2x3_t a2) {
- // CHECK: test_vld3q_lane_s64
- return vld3q_lane_s64(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.ld3lane.v2i64.p0i8
-}
-
-uint64x2x3_t test_vld3q_lane_u64(const void * a1, uint64x2x3_t a2) {
- // CHECK: test_vld3q_lane_u64
- return vld3q_lane_u64(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.ld3lane.v2i64.p0i8
-}
-
-int64x1x3_t test_vld3_lane_s64(const void * a1, int64x1x3_t a2) {
- // CHECK: test_vld3_lane_s64
- return vld3_lane_s64(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld3lane.v1i64.p0i8
-}
-
-uint64x1x3_t test_vld3_lane_u64(const void * a1, uint64x1x3_t a2) {
- // CHECK: test_vld3_lane_u64
- return vld3_lane_u64(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld3lane.v1i64.p0i8
-}
-
-int8x8x3_t test_vld3_lane_s8(const void * a1, int8x8x3_t a2) {
- // CHECK: test_vld3_lane_s8
- return vld3_lane_s8(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld3lane.v8i8.p0i8
-}
-
-poly8x16x3_t test_vld3q_lane_p8(const void * a1, poly8x16x3_t a2) {
- // CHECK: test_vld3q_lane_p8
- return vld3q_lane_p8(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld3lane.v16i8.p0i8
-}
-
-uint8x16x3_t test_vld3q_lane_u8(const void * a1, uint8x16x3_t a2) {
- // CHECK: test_vld3q_lane_u8
- return vld3q_lane_u8(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld3lane.v16i8.p0i8
-}
-
-int64x2x4_t test_vld4q_lane_s64(const void * a1, int64x2x4_t a2) {
- // CHECK: test_vld4q_lane_s64
- return vld4q_lane_s64(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld4lane.v2i64.p0i8
-}
-
-uint64x2x4_t test_vld4q_lane_u64(const void * a1, uint64x2x4_t a2) {
- // CHECK: test_vld4q_lane_u64
- return vld4q_lane_u64(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld4lane.v2i64.p0i8
-}
-
-int64x1x4_t test_vld4_lane_s64(const void * a1, int64x1x4_t a2) {
- // CHECK: test_vld4_lane_s64
- return vld4_lane_s64(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld4lane.v1i64.p0i8
-}
-
-uint64x1x4_t test_vld4_lane_u64(const void * a1, uint64x1x4_t a2) {
- // CHECK: test_vld4_lane_u64
- return vld4_lane_u64(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld4lane.v1i64.p0i8
-}
-
-int8x8x4_t test_vld4_lane_s8(const void * a1, int8x8x4_t a2) {
- // CHECK: test_vld4_lane_s8
- return vld4_lane_s8(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld4lane.v8i8.p0i8
-}
-
-uint8x8x4_t test_vld4_lane_u8(const void * a1, uint8x8x4_t a2) {
- // CHECK: test_vld4_lane_u8
- return vld4_lane_u8(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld4lane.v8i8.p0i8
-}
-
-poly8x16x4_t test_vld4q_lane_p8(const void * a1, poly8x16x4_t a2) {
- // CHECK: test_vld4q_lane_p8
- return vld4q_lane_p8(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld4lane.v16i8.p0i8
-}
-
-int8x16x4_t test_vld4q_lane_s8(const void * a1, int8x16x4_t a2) {
- // CHECK: test_vld4q_lane_s8
- return vld4q_lane_s8(a1, a2, 0);
- // CHECK: extractvalue {{.*}} 0{{ *$}}
- // CHECK: extractvalue {{.*}} 1{{ *$}}
- // CHECK: extractvalue {{.*}} 2{{ *$}}
- // CHECK: extractvalue {{.*}} 3{{ *$}}
-}
-
-uint8x16x4_t test_vld4q_lane_u8(const void * a1, uint8x16x4_t a2) {
- // CHECK: test_vld4q_lane_u8
- return vld4q_lane_u8(a1, a2, 0);
- // CHECK: llvm.aarch64.neon.ld4lane.v16i8.p0i8
-}
-
Removed: cfe/trunk/test/CodeGen/arm64_vMaxMin.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vMaxMin.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vMaxMin.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vMaxMin.c (removed)
@@ -1,207 +0,0 @@
-// RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
-// RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - %s | FileCheck -check-prefix=CHECK-CODEGEN %s
-// REQUIRES: aarch64-registered-target
-// Test ARM64 SIMD max/min intrinsics
-
-#include <arm_neon.h>
-
-// Test a represntative sample of 8 and 16, signed and unsigned, 64 and 128 bit reduction
-int8_t test_vmaxv_s8(int8x8_t a1) {
- // CHECK-LABEL: define i8 @test_vmaxv_s8(
- return vmaxv_s8(a1);
- // CHECK: call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(
-}
-
-uint16_t test_vminvq_u16(uint16x8_t a1) {
- // CHECK-LABEL: define i16 @test_vminvq_u16(
- return vminvq_u16(a1);
- // CHECK: call i32 @llvm.aarch64.neon.uminv.i32.v8i16(
-}
-
-// Test a represntative sample of 8 and 16, signed and unsigned, 64 and 128 bit pairwise
-uint8x8_t test_vmin_u8(uint8x8_t a1, uint8x8_t a2) {
- // CHECK-LABEL: define <8 x i8> @test_vmin_u8(
- return vmin_u8(a1, a2);
- // CHECK: call <8 x i8> @llvm.aarch64.neon.umin.v8i8(
-}
-
-uint8x16_t test_vminq_u8(uint8x16_t a1, uint8x16_t a2) {
- // CHECK-LABEL: define <16 x i8> @test_vminq_u8(
- return vminq_u8(a1, a2);
- // CHECK: call <16 x i8> @llvm.aarch64.neon.umin.v16i8(
-}
-
-int16x8_t test_vmaxq_s16(int16x8_t a1, int16x8_t a2) {
- // CHECK-LABEL: define <8 x i16> @test_vmaxq_s16(
- return vmaxq_s16(a1, a2);
- // CHECK: call <8 x i16> @llvm.aarch64.neon.smax.v8i16(
-}
-
-// Test the more complicated cases of [suf]32 and f64
-float64x2_t test_vmaxq_f64(float64x2_t a1, float64x2_t a2) {
- // CHECK-LABEL: define <2 x double> @test_vmaxq_f64(
- return vmaxq_f64(a1, a2);
- // CHECK: call <2 x double> @llvm.aarch64.neon.fmax.v2f64(
-}
-
-float32x4_t test_vmaxq_f32(float32x4_t a1, float32x4_t a2) {
- // CHECK-LABEL: define <4 x float> @test_vmaxq_f32(
- return vmaxq_f32(a1, a2);
- // CHECK: call <4 x float> @llvm.aarch64.neon.fmax.v4f32(
-}
-
-float64x2_t test_vminq_f64(float64x2_t a1, float64x2_t a2) {
- // CHECK-LABEL: define <2 x double> @test_vminq_f64(
- return vminq_f64(a1, a2);
- // CHECK: call <2 x double> @llvm.aarch64.neon.fmin.v2f64(
-}
-
-float32x2_t test_vmax_f32(float32x2_t a1, float32x2_t a2) {
- // CHECK-LABEL: define <2 x float> @test_vmax_f32(
- return vmax_f32(a1, a2);
- // CHECK: call <2 x float> @llvm.aarch64.neon.fmax.v2f32(
-}
-
-int32x2_t test_vmax_s32(int32x2_t a1, int32x2_t a2) {
- // CHECK-LABEL: define <2 x i32> @test_vmax_s32(
- return vmax_s32(a1, a2);
- // CHECK: call <2 x i32> @llvm.aarch64.neon.smax.v2i32(
-}
-
-uint32x2_t test_vmin_u32(uint32x2_t a1, uint32x2_t a2) {
- // CHECK-LABEL: define <2 x i32> @test_vmin_u32(
- return vmin_u32(a1, a2);
- // CHECK: call <2 x i32> @llvm.aarch64.neon.umin.v2i32(
-}
-
-float32_t test_vmaxnmv_f32(float32x2_t a1) {
- // CHECK-LABEL: define float @test_vmaxnmv_f32(
- return vmaxnmv_f32(a1);
- // CHECK: llvm.aarch64.neon.fmaxnmv.f32.v2f32
- // CHECK-NEXT: ret
-}
-
-// this doesn't translate into a valid instruction, regardless of what the
-// ARM doc says.
-#if 0
-float64_t test_vmaxnmvq_f64(float64x2_t a1) {
- // CHECK@ test_vmaxnmvq_f64
- return vmaxnmvq_f64(a1);
- // CHECK@ llvm.aarch64.neon.saddlv.i64.v2i32
- // CHECK-NEXT@ ret
-}
-#endif
-
-float32_t test_vmaxnmvq_f32(float32x4_t a1) {
- // CHECK-LABEL: define float @test_vmaxnmvq_f32(
- return vmaxnmvq_f32(a1);
- // CHECK: call float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(
- // CHECK-NEXT: ret
-}
-
-float32_t test_vmaxv_f32(float32x2_t a1) {
- // CHECK-LABEL: define float @test_vmaxv_f32(
- return vmaxv_f32(a1);
- // CHECK: call float @llvm.aarch64.neon.fmaxv.f32.v2f32(
- // FIXME check that the 2nd and 3rd arguments are the same V register below
- // CHECK-CODEGEN: fmaxp.2s
- // CHECK-NEXT: ret
-}
-
-int32_t test_vmaxv_s32(int32x2_t a1) {
- // CHECK-LABEL: define i32 @test_vmaxv_s32(
- return vmaxv_s32(a1);
- // CHECK: call i32 @llvm.aarch64.neon.smaxv.i32.v2i32(
- // FIXME check that the 2nd and 3rd arguments are the same V register below
- // CHECK-CODEGEN: smaxp.2s
- // CHECK-NEXT: ret
-}
-
-uint32_t test_vmaxv_u32(uint32x2_t a1) {
- // CHECK-LABEL: define i32 @test_vmaxv_u32(
- return vmaxv_u32(a1);
- // CHECK: call i32 @llvm.aarch64.neon.umaxv.i32.v2i32(
- // FIXME check that the 2nd and 3rd arguments are the same V register below
- // CHECK-CODEGEN: umaxp.2s
- // CHECK-NEXT: ret
-}
-
-// FIXME punt on this for now; don't forget to fix CHECKs
-#if 0
-float64_t test_vmaxvq_f64(float64x2_t a1) {
- // CHECK@ test_vmaxvq_f64
- return vmaxvq_f64(a1);
- // CHECK@ llvm.aarch64.neon.fmaxv.i64.v2f64
- // CHECK-NEXT@ ret
-}
-#endif
-
-float32_t test_vmaxvq_f32(float32x4_t a1) {
- // CHECK-LABEL: define float @test_vmaxvq_f32(
- return vmaxvq_f32(a1);
- // CHECK: call float @llvm.aarch64.neon.fmaxv.f32.v4f32(
- // CHECK-NEXT: ret
-}
-
-float32_t test_vminnmv_f32(float32x2_t a1) {
- // CHECK-LABEL: define float @test_vminnmv_f32(
- return vminnmv_f32(a1);
- // CHECK: call float @llvm.aarch64.neon.fminnmv.f32.v2f32(
- // CHECK-NEXT: ret
-}
-
-float32_t test_vminvq_f32(float32x4_t a1) {
- // CHECK-LABEL: define float @test_vminvq_f32(
- return vminvq_f32(a1);
- // CHECK: call float @llvm.aarch64.neon.fminv.f32.v4f32(
- // CHECK-NEXT: ret
-}
-
-// this doesn't translate into a valid instruction, regardless of what the ARM
-// doc says.
-#if 0
-float64_t test_vminnmvq_f64(float64x2_t a1) {
- // CHECK@ test_vminnmvq_f64
- return vminnmvq_f64(a1);
- // CHECK@ llvm.aarch64.neon.saddlv.i64.v2i32
- // CHECK-NEXT@ ret
-}
-#endif
-
-float32_t test_vminnmvq_f32(float32x4_t a1) {
- // CHECK-LABEL: define float @test_vminnmvq_f32(
- return vminnmvq_f32(a1);
- // CHECK: call float @llvm.aarch64.neon.fminnmv.f32.v4f32(
- // CHECK-NEXT: ret
-}
-
-float32_t test_vminv_f32(float32x2_t a1) {
- // CHECK-LABEL: define float @test_vminv_f32(
- return vminv_f32(a1);
- // CHECK: call float @llvm.aarch64.neon.fminv.f32.v2f32(
- // CHECK-NEXT: ret
-}
-
-int32_t test_vminv_s32(int32x2_t a1) {
- // CHECK-LABEL: define i32 @test_vminv_s32(
- return vminv_s32(a1);
- // CHECK: call i32 @llvm.aarch64.neon.sminv.i32.v2i32(
- // CHECK-CODEGEN: sminp.2s
- // CHECK-NEXT: ret
-}
-
-uint32_t test_vminv_u32(uint32x2_t a1) {
- // CHECK-LABEL: define i32 @test_vminv_u32(
- return vminv_u32(a1);
- // CHECK: call i32 @llvm.aarch64.neon.uminv.i32.v2i32(
-}
-
-// FIXME punt on this for now; don't forget to fix CHECKs
-#if 0
-float64_t test_vminvq_f64(float64x2_t a1) {
- // CHECK@ test_vminvq_f64
- return vminvq_f64(a1);
- // CHECK@ llvm.aarch64.neon.saddlv.i64.v2i32
- // CHECK-NEXT@ ret
-}
-#endif
Removed: cfe/trunk/test/CodeGen/arm64_vadd.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vadd.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vadd.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vadd.c (removed)
@@ -1,102 +0,0 @@
-// RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
-// Test ARM64 SIMD add intrinsics
-
-#include <arm_neon.h>
-int64_t test_vaddlv_s32(int32x2_t a1) {
- // CHECK: test_vaddlv_s32
- return vaddlv_s32(a1);
- // CHECK: llvm.aarch64.neon.saddlv.i64.v2i32
- // CHECK-NEXT: ret
-}
-
-uint64_t test_vaddlv_u32(uint32x2_t a1) {
- // CHECK: test_vaddlv_u32
- return vaddlv_u32(a1);
- // CHECK: llvm.aarch64.neon.uaddlv.i64.v2i32
- // CHECK-NEXT: ret
-}
-
-int8_t test_vaddv_s8(int8x8_t a1) {
- // CHECK: test_vaddv_s8
- return vaddv_s8(a1);
- // CHECK: llvm.aarch64.neon.saddv.i32.v8i8
- // don't check for return here (there's a trunc?)
-}
-
-int16_t test_vaddv_s16(int16x4_t a1) {
- // CHECK: test_vaddv_s16
- return vaddv_s16(a1);
- // CHECK: llvm.aarch64.neon.saddv.i32.v4i16
- // don't check for return here (there's a trunc?)
-}
-
-int32_t test_vaddv_s32(int32x2_t a1) {
- // CHECK: test_vaddv_s32
- return vaddv_s32(a1);
- // CHECK: llvm.aarch64.neon.saddv.i32.v2i32
- // CHECK-NEXT: ret
-}
-
-uint8_t test_vaddv_u8(int8x8_t a1) {
- // CHECK: test_vaddv_u8
- return vaddv_u8(a1);
- // CHECK: llvm.aarch64.neon.uaddv.i32.v8i8
- // don't check for return here (there's a trunc?)
-}
-
-uint16_t test_vaddv_u16(int16x4_t a1) {
- // CHECK: test_vaddv_u16
- return vaddv_u16(a1);
- // CHECK: llvm.aarch64.neon.uaddv.i32.v4i16
- // don't check for return here (there's a trunc?)
-}
-
-uint32_t test_vaddv_u32(int32x2_t a1) {
- // CHECK: test_vaddv_u32
- return vaddv_u32(a1);
- // CHECK: llvm.aarch64.neon.uaddv.i32.v2i32
- // CHECK-NEXT: ret
-}
-
-int8_t test_vaddvq_s8(int8x16_t a1) {
- // CHECK: test_vaddvq_s8
- return vaddvq_s8(a1);
- // CHECK: llvm.aarch64.neon.saddv.i32.v16i8
- // don't check for return here (there's a trunc?)
-}
-
-int16_t test_vaddvq_s16(int16x8_t a1) {
- // CHECK: test_vaddvq_s16
- return vaddvq_s16(a1);
- // CHECK: llvm.aarch64.neon.saddv.i32.v8i16
- // don't check for return here (there's a trunc?)
-}
-
-int32_t test_vaddvq_s32(int32x4_t a1) {
- // CHECK: test_vaddvq_s32
- return vaddvq_s32(a1);
- // CHECK: llvm.aarch64.neon.saddv.i32.v4i32
- // CHECK-NEXT: ret
-}
-
-uint8_t test_vaddvq_u8(int8x16_t a1) {
- // CHECK: test_vaddvq_u8
- return vaddvq_u8(a1);
- // CHECK: llvm.aarch64.neon.uaddv.i32.v16i8
- // don't check for return here (there's a trunc?)
-}
-
-uint16_t test_vaddvq_u16(int16x8_t a1) {
- // CHECK: test_vaddvq_u16
- return vaddvq_u16(a1);
- // CHECK: llvm.aarch64.neon.uaddv.i32.v8i16
- // don't check for return here (there's a trunc?)
-}
-
-uint32_t test_vaddvq_u32(int32x4_t a1) {
- // CHECK: test_vaddvq_u32
- return vaddvq_u32(a1);
- // CHECK: llvm.aarch64.neon.uaddv.i32.v4i32
- // CHECK-NEXT: ret
-}
-
Removed: cfe/trunk/test/CodeGen/arm64_vca.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vca.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vca.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vca.c (removed)
@@ -1,59 +0,0 @@
-// RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
-// Test ARM64 vector compare absolute intrinsics
-
-#include <arm_neon.h>
-
-uint32x2_t test_vcale_f32(float32x2_t a1, float32x2_t a2) {
- // CHECK: test_vcale_f32
- return vcale_f32(a1, a2);
- // CHECK: llvm.aarch64.neon.facge.v2i32.v2f32
- // no check for ret here, as there is a bitcast
-}
-
-uint32x4_t test_vcaleq_f32(float32x4_t a1, float32x4_t a2) {
- // CHECK: test_vcaleq_f32
- return vcaleq_f32(a1, a2);
- // CHECK: llvm.aarch64.neon.facge.v4i32.v4f32{{.*a2,.*a1}}
- // no check for ret here, as there is a bitcast
-}
-
-uint32x2_t test_vcalt_f32(float32x2_t a1, float32x2_t a2) {
- // CHECK: test_vcalt_f32
- return vcalt_f32(a1, a2);
- // CHECK: llvm.aarch64.neon.facgt.v2i32.v2f32{{.*a2,.*a1}}
- // no check for ret here, as there is a bitcast
-}
-
-uint32x4_t test_vcaltq_f32(float32x4_t a1, float32x4_t a2) {
- // CHECK: test_vcaltq_f32
- return vcaltq_f32(a1, a2);
- // CHECK: llvm.aarch64.neon.facgt.v4i32.v4f32{{.*a2,.*a1}}
-}
-
-uint64x2_t test_vcagtq_f64(float64x2_t a1, float64x2_t a2) {
- // CHECK: test_vcagtq_f64
- return vcagtq_f64(a1, a2);
- // CHECK: llvm.aarch64.neon.facgt.v2i64.v2f64{{.*a1,.*a2}}
- // no check for ret here, as there is a bitcast
-}
-
-uint64x2_t test_vcaltq_f64(float64x2_t a1, float64x2_t a2) {
- // CHECK: test_vcaltq_f64
- return vcaltq_f64(a1, a2);
- // CHECK: llvm.aarch64.neon.facgt.v2i64.v2f64{{.*a2,.*a1}}
- // no check for ret here, as there is a bitcast
-}
-
-uint64x2_t test_vcageq_f64(float64x2_t a1, float64x2_t a2) {
- // CHECK: test_vcageq_f64
- return vcageq_f64(a1, a2);
- // CHECK: llvm.aarch64.neon.facge.v2i64.v2f64{{.*a1,.*a2}}
- // no check for ret here, as there is a bitcast
-}
-
-uint64x2_t test_vcaleq_f64(float64x2_t a1, float64x2_t a2) {
- // CHECK: test_vcaleq_f64
- return vcaleq_f64(a1, a2);
- // CHECK: llvm.aarch64.neon.facge.v2i64.v2f64{{.*a2,.*a1}}
- // no check for ret here, as there is a bitcast
-}
Removed: cfe/trunk/test/CodeGen/arm64_vcvtfp.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vcvtfp.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vcvtfp.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vcvtfp.c (removed)
@@ -1,48 +0,0 @@
-// RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
-
-#include <arm_neon.h>
-
-float64x2_t test_vcvt_f64_f32(float32x2_t x) {
- // CHECK-LABEL: test_vcvt_f64_f32
- return vcvt_f64_f32(x);
- // CHECK: fpext <2 x float> {{%.*}} to <2 x double>
- // CHECK-NEXT: ret
-}
-
-float64x2_t test_vcvt_high_f64_f32(float32x4_t x) {
- // CHECK-LABEL: test_vcvt_high_f64_f32
- return vcvt_high_f64_f32(x);
- // CHECK: [[HIGH:%.*]] = shufflevector <4 x float> {{%.*}}, <4 x float> undef, <2 x i32> <i32 2, i32 3>
- // CHECK-NEXT: fpext <2 x float> [[HIGH]] to <2 x double>
- // CHECK-NEXT: ret
-}
-
-float32x2_t test_vcvt_f32_f64(float64x2_t v) {
- // CHECK: test_vcvt_f32_f64
- return vcvt_f32_f64(v);
- // CHECK: fptrunc <2 x double> {{%.*}} to <2 x float>
- // CHECK-NEXT: ret
-}
-
-float32x4_t test_vcvt_high_f32_f64(float32x2_t x, float64x2_t v) {
- // CHECK: test_vcvt_high_f32_f64
- return vcvt_high_f32_f64(x, v);
- // CHECK: [[TRUNC:%.*]] = fptrunc <2 x double> {{.*}} to <2 x float>
- // CHECK-NEXT: shufflevector <2 x float> {{.*}}, <2 x float> [[TRUNC]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- // CHECK-NEXT: ret
-}
-
-float32x2_t test_vcvtx_f32_f64(float64x2_t v) {
- // CHECK: test_vcvtx_f32_f64
- return vcvtx_f32_f64(v);
- // CHECK: llvm.aarch64.neon.fcvtxn.v2f32.v2f64
- // CHECK-NEXT: ret
-}
-
-float32x4_t test_vcvtx_high_f32_f64(float32x2_t x, float64x2_t v) {
- // CHECK: test_vcvtx_high_f32_f64
- return vcvtx_high_f32_f64(x, v);
- // CHECK: llvm.aarch64.neon.fcvtxn.v2f32.v2f64
- // CHECK: shufflevector
- // CHECK: ret
-}
Removed: cfe/trunk/test/CodeGen/arm64_vecCmpBr.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vecCmpBr.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vecCmpBr.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vecCmpBr.c (removed)
@@ -1,111 +0,0 @@
-// RUN: %clang_cc1 -O3 -triple arm64-apple-ios7 -target-feature +neon -S -ffreestanding %s -o - -target-cpu cyclone | FileCheck %s
-// REQUIRES: aarch64-registered-target
-// test code generation for <rdar://problem/11487757>
-#include <arm_neon.h>
-
-unsigned bar();
-
-// Branch if any lane of V0 is zero; 64 bit => !min
-unsigned anyZero64(uint16x4_t a) {
-// CHECK: anyZero64:
-// CHECK: uminv.8b b[[REGNO1:[0-9]+]], v0
-// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
-// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
-// CHECK: [[LABEL]]:
-// CHECK-NEXT: b {{_bar|bar}}
- if (!vminv_u8(a))
- return bar();
- return 0;
-}
-
-// Branch if any lane of V0 is zero; 128 bit => !min
-unsigned anyZero128(uint16x8_t a) {
-// CHECK: anyZero128:
-// CHECK: uminv.16b b[[REGNO1:[0-9]+]], v0
-// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
-// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
-// CHECK: [[LABEL]]:
-// CHECK-NEXT: b {{_bar|bar}}
- if (!vminvq_u8(a))
- return bar();
- return 0;
-}
-
-// Branch if any lane of V0 is non-zero; 64 bit => max
-unsigned anyNonZero64(uint16x4_t a) {
-// CHECK: anyNonZero64:
-// CHECK: umaxv.8b b[[REGNO1:[0-9]+]], v0
-// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
-// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
-// CHECK: [[LABEL]]:
-// CHECK-NEXT: movz w0, #0
- if (vmaxv_u8(a))
- return bar();
- return 0;
-}
-
-// Branch if any lane of V0 is non-zero; 128 bit => max
-unsigned anyNonZero128(uint16x8_t a) {
-// CHECK: anyNonZero128:
-// CHECK: umaxv.16b b[[REGNO1:[0-9]+]], v0
-// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
-// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
-// CHECK: [[LABEL]]:
-// CHECK-NEXT: movz w0, #0
- if (vmaxvq_u8(a))
- return bar();
- return 0;
-}
-
-// Branch if all lanes of V0 are zero; 64 bit => !max
-unsigned allZero64(uint16x4_t a) {
-// CHECK: allZero64:
-// CHECK: umaxv.8b b[[REGNO1:[0-9]+]], v0
-// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
-// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
-// CHECK: [[LABEL]]:
-// CHECK-NEXT: b {{_bar|bar}}
- if (!vmaxv_u8(a))
- return bar();
- return 0;
-}
-
-// Branch if all lanes of V0 are zero; 128 bit => !max
-unsigned allZero128(uint16x8_t a) {
-// CHECK: allZero128:
-// CHECK: umaxv.16b b[[REGNO1:[0-9]+]], v0
-// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
-// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
-// CHECK: [[LABEL]]:
-// CHECK-NEXT: b {{_bar|bar}}
- if (!vmaxvq_u8(a))
- return bar();
- return 0;
-}
-
-// Branch if all lanes of V0 are non-zero; 64 bit => min
-unsigned allNonZero64(uint16x4_t a) {
-// CHECK: allNonZero64:
-// CHECK: uminv.8b b[[REGNO1:[0-9]+]], v0
-// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
-// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
-// CHECK: [[LABEL]]:
-// CHECK-NEXT: movz w0, #0
- if (vminv_u8(a))
- return bar();
- return 0;
-}
-
-// Branch if all lanes of V0 are non-zero; 128 bit => min
-unsigned allNonZero128(uint16x8_t a) {
-// CHECK: allNonZero128:
-// CHECK: uminv.16b b[[REGNO1:[0-9]+]], v0
-// CHECK-NEXT: fmov w[[REGNO2:[0-9]+]], s[[REGNO1]]
-// CHECK-NEXT: cbz w[[REGNO2]], [[LABEL:[.A-Z_0-9]+]]
-// CHECK: [[LABEL]]:
-// CHECK-NEXT: movz w0, #0
- if (vminvq_u8(a))
- return bar();
- return 0;
-}
-
Removed: cfe/trunk/test/CodeGen/arm64_vext.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vext.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vext.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vext.c (removed)
@@ -1,239 +0,0 @@
-// RUN: %clang_cc1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
-
-// Test ARM64 extract intrinsics
-// can use as back end test by adding a run line with
-// -check-prefix=CHECK-CODEGEN on the FileCheck
-
-#include <arm_neon.h>
-
-void test_vext_s8()
-{
- // CHECK: test_vext_s8
- int8x8_t xS8x8;
- xS8x8 = vext_s8(xS8x8, xS8x8, 1);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vext_s8:
- // CHECK-CODEGEN: {{ext.8.*#1}}
-}
-
-void test_vext_u8()
-{
- // CHECK: test_vext_u8
- uint8x8_t xU8x8;
- xU8x8 = vext_u8(xU8x8, xU8x8, 2);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vext_u8:
- // CHECK-CODEGEN: {{ext.8.*#2}}
-}
-
-void test_vext_p8()
-{
- // CHECK: test_vext_p8
- poly8x8_t xP8x8;
- xP8x8 = vext_p8(xP8x8, xP8x8, 3);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vext_p8:
- // CHECK-CODEGEN: {{ext.8.*#3}}
-}
-
-void test_vext_s16()
-{
- // CHECK: test_vext_s16
- int16x4_t xS16x4;
- xS16x4 = vext_s16(xS16x4, xS16x4, 1);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vext_s16:
- // CHECK-CODEGEN: {{ext.8.*#2}}
-}
-
-void test_vext_u16()
-{
- // CHECK: test_vext_u16
- uint16x4_t xU16x4;
- xU16x4 = vext_u16(xU16x4, xU16x4, 2);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vext_u16:
- // CHECK-CODEGEN: {{ext.8.*#4}}
-}
-
-void test_vext_p16()
-{
- // CHECK: test_vext_p16
- poly16x4_t xP16x4;
- xP16x4 = vext_p16(xP16x4, xP16x4, 3);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vext_p16:
- // CHECK-CODEGEN: {{ext.8.*#6}}
-}
-
-void test_vext_s32()
-{
- // CHECK: test_vext_s32
- int32x2_t xS32x2;
- xS32x2 = vext_s32(xS32x2, xS32x2, 1);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vext_s32:
- // CHECK-CODEGEN: {{ext.8.*#4}}
-}
-
-void test_vext_u32()
-{
- // CHECK: test_vext_u32
- uint32x2_t xU32x2;
- xU32x2 = vext_u32(xU32x2, xU32x2, 1);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vext_u32:
- // CHECK-CODEGEN: {{ext.8.*#4}}
-}
-
-void test_vext_f32()
-{
- // CHECK: test_vext_f32
- float32x2_t xF32x2;
- xF32x2 = vext_f32(xF32x2, xF32x2, 1);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vext_f32:
- // CHECK-CODEGEN: {{ext.8.*#4}}
-}
-
-void test_vext_s64()
-{
- // CHECK: test_vext_s64
- int64x1_t xS64x1;
- // FIXME don't use 1 as index or check for now, clang has a bug?
- xS64x1 = vext_s64(xS64x1, xS64x1, /*1*/0);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vext_s64:
- // CHECK_FIXME: {{ext.8.*#0}}
-}
-
-void test_vext_u64()
-{
- // CHECK: test_vext_u64
- uint64x1_t xU64x1;
- // FIXME don't use 1 as index or check for now, clang has a bug?
- xU64x1 = vext_u64(xU64x1, xU64x1, /*1*/0);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vext_u64:
- // CHECK_FIXME: {{ext.8.*#0}}
-}
-
-void test_vextq_s8()
-{
- // CHECK: test_vextq_s8
- int8x16_t xS8x16;
- xS8x16 = vextq_s8(xS8x16, xS8x16, 4);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vextq_s8:
- // CHECK-CODEGEN: {{ext.16.*#4}}
-}
-
-void test_vextq_u8()
-{
- // CHECK: test_vextq_u8
- uint8x16_t xU8x16;
- xU8x16 = vextq_u8(xU8x16, xU8x16, 5);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vextq_u8:
- // CHECK-CODEGEN: {{ext.16.*#5}}
-}
-
-void test_vextq_p8()
-{
- // CHECK: test_vextq_p8
- poly8x16_t xP8x16;
- xP8x16 = vextq_p8(xP8x16, xP8x16, 6);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vextq_p8:
- // CHECK-CODEGEN: {{ext.16.*#6}}
-}
-
-void test_vextq_s16()
-{
- // CHECK: test_vextq_s16
- int16x8_t xS16x8;
- xS16x8 = vextq_s16(xS16x8, xS16x8, 7);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vextq_s16:
- // CHECK-CODEGEN: {{ext.16.*#14}}
-}
-
-void test_vextq_u16()
-{
- // CHECK: test_vextq_u16
- uint16x8_t xU16x8;
- xU16x8 = vextq_u16(xU16x8, xU16x8, 4);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vextq_u16:
- // CHECK-CODEGEN: {{ext.16.*#8}}
-}
-
-void test_vextq_p16()
-{
- // CHECK: test_vextq_p16
- poly16x8_t xP16x8;
- xP16x8 = vextq_p16(xP16x8, xP16x8, 5);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vextq_p16:
- // CHECK-CODEGEN: {{ext.16.*#10}}
-}
-
-void test_vextq_s32()
-{
- // CHECK: test_vextq_s32
- int32x4_t xS32x4;
- xS32x4 = vextq_s32(xS32x4, xS32x4, 1);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vextq_s32:
- // CHECK-CODEGEN: {{ext.16.*#4}}
-}
-
-void test_vextq_u32()
-{
- // CHECK: test_vextq_u32
- uint32x4_t xU32x4;
- xU32x4 = vextq_u32(xU32x4, xU32x4, 2);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vextq_u32:
- // CHECK-CODEGEN: {{ext.16.*#8}}
-}
-
-void test_vextq_f32()
-{
- // CHECK: test_vextq_f32
- float32x4_t xF32x4;
- xF32x4 = vextq_f32(xF32x4, xF32x4, 3);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vextq_f32:
- // CHECK-CODEGEN: {{ext.16.*#12}}
-}
-
-void test_vextq_s64()
-{
- // CHECK: test_vextq_s64
- int64x2_t xS64x2;
- xS64x2 = vextq_s64(xS64x2, xS64x2, 1);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vextq_s64:
- // CHECK-CODEGEN: {{ext.16.*#8}}
-}
-
-void test_vextq_u64()
-{
- // CHECK: test_vextq_u64
- uint64x2_t xU64x2;
- xU64x2 = vextq_u64(xU64x2, xU64x2, 1);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vextq_u64:
- // CHECK-CODEGEN: {{ext.16.*#8}}
-}
-
-void test_vextq_f64()
-{
- // CHECK: test_vextq_f64
- float64x2_t xF64x2;
- xF64x2 = vextq_f64(xF64x2, xF64x2, 1);
- // CHECK: shufflevector
- // CHECK-CODEGEN: test_vextq_u64:
- // CHECK-CODEGEN: {{ext.16.*#8}}
-}
Removed: cfe/trunk/test/CodeGen/arm64_vfma.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vfma.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vfma.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vfma.c (removed)
@@ -1,136 +0,0 @@
-// RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
-// Test ARM64 SIMD fused multiply add intrinsics
-
-#include <arm_neon.h>
-
-float32x2_t test_vfma_f32(float32x2_t a1, float32x2_t a2, float32x2_t a3) {
- // CHECK: test_vfma_f32
- return vfma_f32(a1, a2, a3);
- // CHECK: llvm.fma.v2f32({{.*a2, .*a3, .*a1}})
- // CHECK-NEXT: ret
-}
-
-float32x4_t test_vfmaq_f32(float32x4_t a1, float32x4_t a2, float32x4_t a3) {
- // CHECK: test_vfmaq_f32
- return vfmaq_f32(a1, a2, a3);
- // CHECK: llvm.fma.v4f32({{.*a2, .*a3, .*a1}})
- // CHECK-NEXT: ret
-}
-
-float64x2_t test_vfmaq_f64(float64x2_t a1, float64x2_t a2, float64x2_t a3) {
- // CHECK: test_vfmaq_f64
- return vfmaq_f64(a1, a2, a3);
- // CHECK: llvm.fma.v2f64({{.*a2, .*a3, .*a1}})
- // CHECK-NEXT: ret
-}
-
-float32x2_t test_vfma_lane_f32(float32x2_t a1, float32x2_t a2, float32x2_t a3) {
- // CHECK: test_vfma_lane_f32
- return vfma_lane_f32(a1, a2, a3, 1);
- // NB: the test below is deliberately lose, so that we don't depend too much
- // upon the exact IR used to select lane 1 (usually a shufflevector)
- // CHECK: llvm.fma.v2f32(<2 x float> %a2, <2 x float> {{.*}}, <2 x float> %a1)
- // CHECK-NEXT: ret
-}
-
-float32x4_t test_vfmaq_lane_f32(float32x4_t a1, float32x4_t a2, float32x2_t a3) {
- // CHECK: test_vfmaq_lane_f32
- return vfmaq_lane_f32(a1, a2, a3, 1);
- // NB: the test below is deliberately lose, so that we don't depend too much
- // upon the exact IR used to select lane 1 (usually a shufflevector)
- // CHECK: llvm.fma.v4f32(<4 x float> %a2, <4 x float> {{.*}}, <4 x float> %a1)
- // CHECK-NEXT: ret
-}
-
-float64x2_t test_vfmaq_lane_f64(float64x2_t a1, float64x2_t a2, float64x1_t a3) {
- // CHECK: test_vfmaq_lane_f64
- return vfmaq_lane_f64(a1, a2, a3, 0);
- // NB: the test below is deliberately lose, so that we don't depend too much
- // upon the exact IR used to select lane 1 (usually a shufflevector)
- // CHECK: llvm.fma.v2f64(<2 x double> %a2, <2 x double> {{.*}}, <2 x double> %a1)
- // CHECK-NEXT: ret
-}
-
-float32x2_t test_vfma_n_f32(float32x2_t a1, float32x2_t a2, float32_t a3) {
- // CHECK: test_vfma_n_f32
- return vfma_n_f32(a1, a2, a3);
- // NB: the test below is deliberately lose, so that we don't depend too much
- // upon the exact IR used to select lane 0 (usually two insertelements)
- // CHECK: llvm.fma.v2f32
- // CHECK-NEXT: ret
-}
-
-float32x4_t test_vfmaq_n_f32(float32x4_t a1, float32x4_t a2, float32_t a3) {
- // CHECK: test_vfmaq_n_f32
- return vfmaq_n_f32(a1, a2, a3);
- // NB: the test below is deliberately lose, so that we don't depend too much
- // upon the exact IR used to select lane 0 (usually four insertelements)
- // CHECK: llvm.fma.v4f32
- // CHECK-NEXT: ret
-}
-
-float64x2_t test_vfmaq_n_f64(float64x2_t a1, float64x2_t a2, float64_t a3) {
- // CHECK: test_vfmaq_n_f64
- return vfmaq_n_f64(a1, a2, a3);
- // NB: the test below is deliberately lose, so that we don't depend too much
- // upon the exact IR used to select lane 0 (usually two insertelements)
- // CHECK: llvm.fma.v2f64
- // CHECK-NEXT: ret
-}
-
-float32x2_t test_vfms_f32(float32x2_t a1, float32x2_t a2, float32x2_t a3) {
- // CHECK: test_vfms_f32
- return vfms_f32(a1, a2, a3);
- // CHECK: [[NEG:%.*]] = fsub <2 x float> {{.*}}, %a2
- // CHECK: llvm.fma.v2f32(<2 x float> %a3, <2 x float> [[NEG]], <2 x float> %a1)
- // CHECK-NEXT: ret
-}
-
-float32x4_t test_vfmsq_f32(float32x4_t a1, float32x4_t a2, float32x4_t a3) {
- // CHECK: test_vfmsq_f32
- return vfmsq_f32(a1, a2, a3);
- // CHECK: [[NEG:%.*]] = fsub <4 x float> {{.*}}, %a2
- // CHECK: llvm.fma.v4f32(<4 x float> %a3, <4 x float> [[NEG]], <4 x float> %a1)
- // CHECK-NEXT: ret
-}
-
-float64x2_t test_vfmsq_f64(float64x2_t a1, float64x2_t a2, float64x2_t a3) {
- // CHECK: test_vfmsq_f64
- return vfmsq_f64(a1, a2, a3);
- // CHECK: [[NEG:%.*]] = fsub <2 x double> {{.*}}, %a2
- // CHECK: llvm.fma.v2f64(<2 x double> %a3, <2 x double> [[NEG]], <2 x double> %a1)
- // CHECK-NEXT: ret
-}
-
-float32x2_t test_vfms_lane_f32(float32x2_t a1, float32x2_t a2, float32x2_t a3) {
- // CHECK: test_vfms_lane_f32
- return vfms_lane_f32(a1, a2, a3, 1);
- // NB: the test below is deliberately lose, so that we don't depend too much
- // upon the exact IR used to select lane 1 (usually a shufflevector)
- // CHECK: [[NEG:%.*]] = fsub <2 x float> {{.*}}, %a3
- // CHECK: [[LANE:%.*]] = shufflevector <2 x float> [[NEG]]
- // CHECK: llvm.fma.v2f32(<2 x float> {{.*}}, <2 x float> [[LANE]], <2 x float> %a1)
- // CHECK-NEXT: ret
-}
-
-float32x4_t test_vfmsq_lane_f32(float32x4_t a1, float32x4_t a2, float32x2_t a3) {
- // CHECK: test_vfmsq_lane_f32
- return vfmsq_lane_f32(a1, a2, a3, 1);
- // NB: the test below is deliberately lose, so that we don't depend too much
- // upon the exact IR used to select lane 1 (usually a shufflevector)
- // CHECK: [[NEG:%.*]] = fsub <2 x float> {{.*}}, %a3
- // CHECK: [[LANE:%.*]] = shufflevector <2 x float> [[NEG]]
- // CHECK: llvm.fma.v4f32(<4 x float> {{.*}}, <4 x float> [[LANE]], <4 x float> %a1)
- // CHECK-NEXT: ret
-}
-
-float64x2_t test_vfmsq_lane_f64(float64x2_t a1, float64x2_t a2, float64x1_t a3) {
- // CHECK: test_vfmsq_lane_f64
- return vfmsq_lane_f64(a1, a2, a3, 0);
- // NB: the test below is deliberately lose, so that we don't depend too much
- // upon the exact IR used to select lane 1 (usually a shufflevector)
- // CHECK: [[NEG:%.*]] = fsub <1 x double> {{.*}}, %a3
- // CHECK: [[LANE:%.*]] = shufflevector <1 x double> [[NEG]]
- // CHECK: llvm.fma.v2f64(<2 x double> {{.*}}, <2 x double> [[LANE]], <2 x double> %a1)
- // CHECK-NEXT: ret
-}
Removed: cfe/trunk/test/CodeGen/arm64_vneg.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vneg.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vneg.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vneg.c (removed)
@@ -1,18 +0,0 @@
-// RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
-// Test ARM64 SIMD negate and saturating negate intrinsics
-
-#include <arm_neon.h>
-
-int64x2_t test_vnegq_s64(int64x2_t a1) {
- // CHECK: test_vnegq_s64
- return vnegq_s64(a1);
- // CHECK: sub <2 x i64> zeroinitializer, %a1
- // CHECK-NEXT: ret
-}
-
-int64x2_t test_vqnegq_s64(int64x2_t a1) {
- // CHECK: test_vqnegq_s64
- return vqnegq_s64(a1);
- // CHECK: llvm.aarch64.neon.sqneg.v2i64
- // CHECK-NEXT: ret
-}
Removed: cfe/trunk/test/CodeGen/arm64_vqmov.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vqmov.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vqmov.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vqmov.c (removed)
@@ -1,77 +0,0 @@
-// RUN: %clang_cc1 -O3 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - %s | FileCheck %s
-// REQUIRES: aarch64-registered-target
-/// Test vqmov[u]n_high_<su>{16,32,64) ARM64 intrinsics
-
-#include <arm_neon.h>
-
-// vqmovn_high_s16 -> UQXTN2 Vd.16b,Vn.8h
-int8x16_t test_vqmovn_high_s16(int8x8_t Vdlow, int16x8_t Vn)
-{
- return vqmovn_high_s16(Vdlow, Vn);
- // CHECK: test_vqmovn_high_s16:
- // CHECK: sqxtn2.16b {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
-}
-
-// vqmovun_high_s16 -> UQXTN2 Vd.16b,Vn.8h
-uint8x16_t test_vqmovun_high_s16(uint8x8_t Vdlow, uint16x8_t Vn)
-{
- return vqmovun_high_s16(Vdlow, Vn);
- // CHECK: test_vqmovun_high_s16:
- // CHECK: sqxtun2.16b {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
-}
-
-// vqmovn_high_s32 -> SQXTN2 Vd.8h,Vn.4s
-int16x8_t test_vqmovn_high_s32(int16x4_t Vdlow, int32x4_t Vn)
-{
- return vqmovn_high_s32(Vdlow, Vn);
- // CHECK: test_vqmovn_high_s32:
- // CHECK: sqxtn2.8h {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
-}
-
-// vqmovn_high_u32 -> UQXTN2 Vd.8h,Vn.4s
-uint16x8_t test_vqmovn_high_u32(uint16x4_t Vdlow, uint32x4_t Vn)
-{
- return vqmovn_high_u32(Vdlow, Vn);
- // CHECK: test_vqmovn_high_u32:
- // CHECK: uqxtn2.8h {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
-}
-
-// vqmovn_high_s64 -> SQXTN2 Vd.4s,Vn.2d
-int32x4_t test_vqmovn_high_s64(int32x2_t Vdlow, int64x2_t Vn)
-{
- return vqmovn_high_s64(Vdlow, Vn);
- // CHECK: test_vqmovn_high_s64:
- // CHECK: sqxtn2.4s {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
-}
-
-// vqmovn_high_u64 -> UQXTN2 Vd.4s,Vn.2d
-uint32x4_t test_vqmovn_high_u64(uint32x2_t Vdlow, uint64x2_t Vn)
-{
- return vqmovn_high_u64(Vdlow, Vn);
- // CHECK: test_vqmovn_high_u64:
- // CHECK: uqxtn2.4s {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
-}
-
-// vqmovn_high_u16 -> UQXTN2 Vd.16b,Vn.8h
-uint8x16_t test_vqmovn_high_u16(uint8x8_t Vdlow, uint16x8_t Vn)
-{
- return vqmovn_high_u16(Vdlow, Vn);
- // CHECK: test_vqmovn_high_u16:
- // CHECK: uqxtn2.16b {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
-}
-
-// vqmovun_high_s32 -> SQXTUN2 Vd.8h,Vn.4s
-uint16x8_t test_vqmovun_high_s32(uint16x4_t Vdlow, uint32x4_t Vn)
-{
- return vqmovun_high_s32(Vdlow, Vn);
- // CHECK: test_vqmovun_high_s32:
- // CHECK: sqxtun2.8h {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
-}
-
-// vqmovun_high_s64 -> SQXTUN2 Vd.4s,Vn.2d
-uint32x4_t test_vqmovun_high_s64(uint32x2_t Vdlow, uint64x2_t Vn)
-{
- return vqmovun_high_s64(Vdlow, Vn);
- // CHECK: test_vqmovun_high_s64:
- // CHECK: sqxtun2.4s {{v[0-9][0-9]*}}, {{v[0-9][0-9]*}}
-}
Removed: cfe/trunk/test/CodeGen/arm64_vrecps.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vrecps.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vrecps.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vrecps.c (removed)
@@ -1,26 +0,0 @@
-// RUN: %clang_cc1 -O3 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - %s | FileCheck %s
-// REQUIRES: aarch64-registered-target
-/// Test vrecpss_f32, vrecpsd_f64 ARM64 intrinsics
-
-
-#include <arm_neon.h>
-
-// vrecpss_f32 -> FRECPS Sd,Sa,Sb
-//
-float32_t test_vrecpss_f32(float32_t Vdlow, float32_t Vn)
-{
- return vrecpss_f32(Vdlow, Vn);
- // CHECK: test_vrecpss_f32:
- // CHECK: frecps s0, s0, s1
- // CHECK-NEXT: ret
-}
-
-// vrecpsd_f64 -> FRECPS Dd,Da,Db
-//
-float64_t test_vrecpsd_f64(float64_t Vdlow, float64_t Vn)
-{
- return vrecpsd_f64(Vdlow, Vn);
- // CHECK: test_vrecpsd_f64:
- // CHECK: frecps d0, d0, d1
- // CHECK-NEXT: ret
-}
Removed: cfe/trunk/test/CodeGen/arm64_vshift.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vshift.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vshift.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vshift.c (removed)
@@ -1,357 +0,0 @@
-// RUN: %clang_cc1 -triple arm64-apple-ios7.0 -target-feature +neon -ffreestanding -emit-llvm -o - -O1 %s | FileCheck %s
-#include <arm_neon.h>
-
-int8x8_t test_vqshl_n_s8(int8x8_t in) {
- // CHECK-LABEL: @test_vqshl_n_s8
- // CHECK: call <8 x i8> @llvm.aarch64.neon.sqshl.v8i8(<8 x i8> %in, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
- return vqshl_n_s8(in, 1);
-}
-
-int16x4_t test_vqshl_n_s16(int16x4_t in) {
- // CHECK-LABEL: @test_vqshl_n_s16
- // CHECK: call <4 x i16> @llvm.aarch64.neon.sqshl.v4i16(<4 x i16> %in, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
- return vqshl_n_s16(in, 1);
-}
-
-int32x2_t test_vqshl_n_s32(int32x2_t in) {
- // CHECK-LABEL: @test_vqshl_n_s32
- // CHECK: call <2 x i32> @llvm.aarch64.neon.sqshl.v2i32(<2 x i32> %in, <2 x i32> <i32 1, i32 1>)
- return vqshl_n_s32(in, 1);
-}
-
-int64x1_t test_vqshl_n_s64(int64x1_t in) {
- // CHECK-LABEL: @test_vqshl_n_s64
- // CHECK: call <1 x i64> @llvm.aarch64.neon.sqshl.v1i64(<1 x i64> %in, <1 x i64> <i64 1>)
- return vqshl_n_s64(in, 1);
-}
-
-
-int8x16_t test_vqshlq_n_s8(int8x16_t in) {
- // CHECK-LABEL: @test_vqshlq_n_s8
- // CHECK: call <16 x i8> @llvm.aarch64.neon.sqshl.v16i8(<16 x i8> %in, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
- return vqshlq_n_s8(in, 1);
-}
-
-int16x8_t test_vqshlq_n_s16(int16x8_t in) {
- // CHECK-LABEL: @test_vqshlq_n_s16
- // CHECK: call <8 x i16> @llvm.aarch64.neon.sqshl.v8i16(<8 x i16> %in, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
- return vqshlq_n_s16(in, 1);
-}
-
-int32x4_t test_vqshlq_n_s32(int32x4_t in) {
- // CHECK-LABEL: @test_vqshlq_n_s32
- // CHECK: call <4 x i32> @llvm.aarch64.neon.sqshl.v4i32(<4 x i32> %in, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
- return vqshlq_n_s32(in, 1);
-}
-
-int64x2_t test_vqshlq_n_s64(int64x2_t in) {
- // CHECK-LABEL: @test_vqshlq_n_s64
- // CHECK: call <2 x i64> @llvm.aarch64.neon.sqshl.v2i64(<2 x i64> %in, <2 x i64> <i64 1, i64 1>
- return vqshlq_n_s64(in, 1);
-}
-
-uint8x8_t test_vqshl_n_u8(uint8x8_t in) {
- // CHECK-LABEL: @test_vqshl_n_u8
- // CHECK: call <8 x i8> @llvm.aarch64.neon.uqshl.v8i8(<8 x i8> %in, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
- return vqshl_n_u8(in, 1);
-}
-
-uint16x4_t test_vqshl_n_u16(uint16x4_t in) {
- // CHECK-LABEL: @test_vqshl_n_u16
- // CHECK: call <4 x i16> @llvm.aarch64.neon.uqshl.v4i16(<4 x i16> %in, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
- return vqshl_n_u16(in, 1);
-}
-
-uint32x2_t test_vqshl_n_u32(uint32x2_t in) {
- // CHECK-LABEL: @test_vqshl_n_u32
- // CHECK: call <2 x i32> @llvm.aarch64.neon.uqshl.v2i32(<2 x i32> %in, <2 x i32> <i32 1, i32 1>)
- return vqshl_n_u32(in, 1);
-}
-
-uint64x1_t test_vqshl_n_u64(uint64x1_t in) {
- // CHECK-LABEL: @test_vqshl_n_u64
- // CHECK: call <1 x i64> @llvm.aarch64.neon.uqshl.v1i64(<1 x i64> %in, <1 x i64> <i64 1>)
- return vqshl_n_u64(in, 1);
-}
-
-uint8x16_t test_vqshlq_n_u8(uint8x16_t in) {
- // CHECK-LABEL: @test_vqshlq_n_u8
- // CHECK: call <16 x i8> @llvm.aarch64.neon.uqshl.v16i8(<16 x i8> %in, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
- return vqshlq_n_u8(in, 1);
-}
-
-uint16x8_t test_vqshlq_n_u16(uint16x8_t in) {
- // CHECK-LABEL: @test_vqshlq_n_u16
- // CHECK: call <8 x i16> @llvm.aarch64.neon.uqshl.v8i16(<8 x i16> %in, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
- return vqshlq_n_u16(in, 1);
-}
-
-uint32x4_t test_vqshlq_n_u32(uint32x4_t in) {
- // CHECK-LABEL: @test_vqshlq_n_u32
- // CHECK: call <4 x i32> @llvm.aarch64.neon.uqshl.v4i32(<4 x i32> %in, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
- return vqshlq_n_u32(in, 1);
-}
-
-uint64x2_t test_vqshlq_n_u64(uint64x2_t in) {
- // CHECK-LABEL: @test_vqshlq_n_u64
- // CHECK: call <2 x i64> @llvm.aarch64.neon.uqshl.v2i64(<2 x i64> %in, <2 x i64> <i64 1, i64 1>
- return vqshlq_n_u64(in, 1);
-}
-
-int8x8_t test_vrshr_n_s8(int8x8_t in) {
- // CHECK-LABEL: @test_vrshr_n_s8
- // CHECK: call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
- return vrshr_n_s8(in, 1);
-}
-
-int16x4_t test_vrshr_n_s16(int16x4_t in) {
- // CHECK-LABEL: @test_vrshr_n_s16
- // CHECK: call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
- return vrshr_n_s16(in, 1);
-}
-
-int32x2_t test_vrshr_n_s32(int32x2_t in) {
- // CHECK-LABEL: @test_vrshr_n_s32
- // CHECK: call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
- return vrshr_n_s32(in, 1);
-}
-
-int64x1_t test_vrshr_n_s64(int64x1_t in) {
- // CHECK-LABEL: @test_vrshr_n_s64
- // CHECK: call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
- return vrshr_n_s64(in, 1);
-}
-
-
-int8x16_t test_vrshrq_n_s8(int8x16_t in) {
- // CHECK-LABEL: @test_vrshrq_n_s8
- // CHECK: call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
- return vrshrq_n_s8(in, 1);
-}
-
-int16x8_t test_vrshrq_n_s16(int16x8_t in) {
- // CHECK-LABEL: @test_vrshrq_n_s16
- // CHECK: call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
- return vrshrq_n_s16(in, 1);
-}
-
-int32x4_t test_vrshrq_n_s32(int32x4_t in) {
- // CHECK-LABEL: @test_vrshrq_n_s32
- // CHECK: call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
- return vrshrq_n_s32(in, 1);
-}
-
-int64x2_t test_vrshrq_n_s64(int64x2_t in) {
- // CHECK-LABEL: @test_vrshrq_n_s64
- // CHECK: call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>
- return vrshrq_n_s64(in, 1);
-}
-
-uint8x8_t test_vrshr_n_u8(uint8x8_t in) {
- // CHECK-LABEL: @test_vrshr_n_u8
- // CHECK: call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
- return vrshr_n_u8(in, 1);
-}
-
-uint16x4_t test_vrshr_n_u16(uint16x4_t in) {
- // CHECK-LABEL: @test_vrshr_n_u16
- // CHECK: call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
- return vrshr_n_u16(in, 1);
-}
-
-uint32x2_t test_vrshr_n_u32(uint32x2_t in) {
- // CHECK-LABEL: @test_vrshr_n_u32
- // CHECK: call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
- return vrshr_n_u32(in, 1);
-}
-
-uint64x1_t test_vrshr_n_u64(uint64x1_t in) {
- // CHECK-LABEL: @test_vrshr_n_u64
- // CHECK: call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
- return vrshr_n_u64(in, 1);
-}
-
-uint8x16_t test_vrshrq_n_u8(uint8x16_t in) {
- // CHECK-LABEL: @test_vrshrq_n_u8
- // CHECK: call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
- return vrshrq_n_u8(in, 1);
-}
-
-uint16x8_t test_vrshrq_n_u16(uint16x8_t in) {
- // CHECK-LABEL: @test_vrshrq_n_u16
- // CHECK: call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
- return vrshrq_n_u16(in, 1);
-}
-
-uint32x4_t test_vrshrq_n_u32(uint32x4_t in) {
- // CHECK-LABEL: @test_vrshrq_n_u32
- // CHECK: call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
- return vrshrq_n_u32(in, 1);
-}
-
-uint64x2_t test_vrshrq_n_u64(uint64x2_t in) {
- // CHECK-LABEL: @test_vrshrq_n_u64
- // CHECK: call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>
- return vrshrq_n_u64(in, 1);
-}
-
-int8x8_t test_vqshlu_n_s8(int8x8_t in) {
- // CHECK-LABEL: @test_vqshlu_n_s8
- // CHECK: call <8 x i8> @llvm.aarch64.neon.sqshlu.v8i8(<8 x i8> %in, <8 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
- return vqshlu_n_s8(in, 1);
-}
-
-int16x4_t test_vqshlu_n_s16(int16x4_t in) {
- // CHECK-LABEL: @test_vqshlu_n_s16
- // CHECK: call <4 x i16> @llvm.aarch64.neon.sqshlu.v4i16(<4 x i16> %in, <4 x i16> <i16 1, i16 1, i16 1, i16 1>)
- return vqshlu_n_s16(in, 1);
-}
-
-int32x2_t test_vqshlu_n_s32(int32x2_t in) {
- // CHECK-LABEL: @test_vqshlu_n_s32
- // CHECK: call <2 x i32> @llvm.aarch64.neon.sqshlu.v2i32(<2 x i32> %in, <2 x i32> <i32 1, i32 1>)
- return vqshlu_n_s32(in, 1);
-}
-
-int64x1_t test_vqshlu_n_s64(int64x1_t in) {
- // CHECK-LABEL: @test_vqshlu_n_s64
- // CHECK: call <1 x i64> @llvm.aarch64.neon.sqshlu.v1i64(<1 x i64> %in, <1 x i64> <i64 1>)
- return vqshlu_n_s64(in, 1);
-}
-
-
-int8x16_t test_vqshluq_n_s8(int8x16_t in) {
- // CHECK-LABEL: @test_vqshluq_n_s8
- // CHECK: call <16 x i8> @llvm.aarch64.neon.sqshlu.v16i8(<16 x i8> %in, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
- return vqshluq_n_s8(in, 1);
-}
-
-int16x8_t test_vqshluq_n_s16(int16x8_t in) {
- // CHECK-LABEL: @test_vqshluq_n_s16
- // CHECK: call <8 x i16> @llvm.aarch64.neon.sqshlu.v8i16(<8 x i16> %in, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>)
- return vqshluq_n_s16(in, 1);
-}
-
-int32x4_t test_vqshluq_n_s32(int32x4_t in) {
- // CHECK-LABEL: @test_vqshluq_n_s32
- // CHECK: call <4 x i32> @llvm.aarch64.neon.sqshlu.v4i32(<4 x i32> %in, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
- return vqshluq_n_s32(in, 1);
-}
-
-int64x2_t test_vqshluq_n_s64(int64x2_t in) {
- // CHECK-LABEL: @test_vqshluq_n_s64
- // CHECK: call <2 x i64> @llvm.aarch64.neon.sqshlu.v2i64(<2 x i64> %in, <2 x i64> <i64 1, i64 1>
- return vqshluq_n_s64(in, 1);
-}
-
-int8x8_t test_vrsra_n_s8(int8x8_t acc, int8x8_t in) {
- // CHECK-LABEL: @test_vrsra_n_s8
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i8> @llvm.aarch64.neon.srshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
- // CHECK: add <8 x i8> [[TMP]], %acc
- return vrsra_n_s8(acc, in, 1);
-}
-
-int16x4_t test_vrsra_n_s16(int16x4_t acc, int16x4_t in) {
- // CHECK-LABEL: @test_vrsra_n_s16
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i16> @llvm.aarch64.neon.srshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
- // CHECK: add <4 x i16> [[TMP]], %acc
- return vrsra_n_s16(acc, in, 1);
-}
-
-int32x2_t test_vrsra_n_s32(int32x2_t acc, int32x2_t in) {
- // CHECK-LABEL: @test_vrsra_n_s32
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i32> @llvm.aarch64.neon.srshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
- // CHECK: add <2 x i32> [[TMP]], %acc
- return vrsra_n_s32(acc, in, 1);
-}
-
-int64x1_t test_vrsra_n_s64(int64x1_t acc, int64x1_t in) {
- // CHECK-LABEL: @test_vrsra_n_s64
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <1 x i64> @llvm.aarch64.neon.srshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
- // CHECK: add <1 x i64> [[TMP]], %acc
- return vrsra_n_s64(acc, in, 1);
-}
-
-int8x16_t test_vrsraq_n_s8(int8x16_t acc, int8x16_t in) {
- // CHECK-LABEL: @test_vrsraq_n_s8
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <16 x i8> @llvm.aarch64.neon.srshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
- // CHECK: add <16 x i8> [[TMP]], %acc
- return vrsraq_n_s8(acc, in, 1);
-}
-
-int16x8_t test_vrsraq_n_s16(int16x8_t acc, int16x8_t in) {
- // CHECK-LABEL: @test_vrsraq_n_s16
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i16> @llvm.aarch64.neon.srshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
- // CHECK: add <8 x i16> [[TMP]], %acc
- return vrsraq_n_s16(acc, in, 1);
-}
-
-int32x4_t test_vrsraq_n_s32(int32x4_t acc, int32x4_t in) {
- // CHECK-LABEL: @test_vrsraq_n_s32
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i32> @llvm.aarch64.neon.srshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
- // CHECK: add <4 x i32> [[TMP]], %acc
- return vrsraq_n_s32(acc, in, 1);
-}
-
-int64x2_t test_vrsraq_n_s64(int64x2_t acc, int64x2_t in) {
- // CHECK-LABEL: @test_vrsraq_n_s64
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i64> @llvm.aarch64.neon.srshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>)
- // CHECK: add <2 x i64> [[TMP]], %acc
- return vrsraq_n_s64(acc, in, 1);
-}
-
-uint8x8_t test_vrsra_n_u8(uint8x8_t acc, uint8x8_t in) {
- // CHECK-LABEL: @test_vrsra_n_u8
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i8> @llvm.aarch64.neon.urshl.v8i8(<8 x i8> %in, <8 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
- // CHECK: add <8 x i8> [[TMP]], %acc
- return vrsra_n_u8(acc, in, 1);
-}
-
-uint16x4_t test_vrsra_n_u16(uint16x4_t acc, uint16x4_t in) {
- // CHECK-LABEL: @test_vrsra_n_u16
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i16> @llvm.aarch64.neon.urshl.v4i16(<4 x i16> %in, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>)
- // CHECK: add <4 x i16> [[TMP]], %acc
- return vrsra_n_u16(acc, in, 1);
-}
-
-uint32x2_t test_vrsra_n_u32(uint32x2_t acc, uint32x2_t in) {
- // CHECK-LABEL: @test_vrsra_n_u32
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i32> @llvm.aarch64.neon.urshl.v2i32(<2 x i32> %in, <2 x i32> <i32 -1, i32 -1>)
- // CHECK: add <2 x i32> [[TMP]], %acc
- return vrsra_n_u32(acc, in, 1);
-}
-
-uint64x1_t test_vrsra_n_u64(uint64x1_t acc, uint64x1_t in) {
- // CHECK-LABEL: @test_vrsra_n_u64
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <1 x i64> @llvm.aarch64.neon.urshl.v1i64(<1 x i64> %in, <1 x i64> <i64 -1>)
- // CHECK: add <1 x i64> [[TMP]], %acc
- return vrsra_n_u64(acc, in, 1);
-}
-
-uint8x16_t test_vrsraq_n_u8(uint8x16_t acc, uint8x16_t in) {
- // CHECK-LABEL: @test_vrsraq_n_u8
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <16 x i8> @llvm.aarch64.neon.urshl.v16i8(<16 x i8> %in, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
- // CHECK: add <16 x i8> [[TMP]], %acc
- return vrsraq_n_u8(acc, in, 1);
-}
-
-uint16x8_t test_vrsraq_n_u16(uint16x8_t acc, uint16x8_t in) {
- // CHECK-LABEL: @test_vrsraq_n_u16
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <8 x i16> @llvm.aarch64.neon.urshl.v8i16(<8 x i16> %in, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
- // CHECK: add <8 x i16> [[TMP]], %acc
- return vrsraq_n_u16(acc, in, 1);
-}
-
-uint32x4_t test_vrsraq_n_u32(uint32x4_t acc, uint32x4_t in) {
- // CHECK-LABEL: @test_vrsraq_n_u32
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <4 x i32> @llvm.aarch64.neon.urshl.v4i32(<4 x i32> %in, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
- // CHECK: add <4 x i32> [[TMP]], %acc
- return vrsraq_n_u32(acc, in, 1);
-}
-
-uint64x2_t test_vrsraq_n_u64(uint64x2_t acc, uint64x2_t in) {
- // CHECK-LABEL: @test_vrsraq_n_u64
- // CHECK: [[TMP:%[0-9a-zA-Z._]+]] = tail call <2 x i64> @llvm.aarch64.neon.urshl.v2i64(<2 x i64> %in, <2 x i64> <i64 -1, i64 -1>)
- // CHECK: add <2 x i64> [[TMP]], %acc
- return vrsraq_n_u64(acc, in, 1);
-}
Removed: cfe/trunk/test/CodeGen/arm64_vsli.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vsli.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vsli.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vsli.c (removed)
@@ -1,148 +0,0 @@
-// RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
-// RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - %s | \
-// RUN: FileCheck -check-prefix=CHECK_CODEGEN %s
-// REQUIRES: aarch64-registered-target
-// Test
-
-#include <arm_neon.h>
-
-int8x8_t test_vsli_n_s8(int8x8_t a1, int8x8_t a2) {
- // CHECK: test_vsli_n_s8
- return vsli_n_s8(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsli.v8i8
- // CHECK_CODEGEN: sli.8b v0, v1, #3
-}
-
-int16x4_t test_vsli_n_s16(int16x4_t a1, int16x4_t a2) {
- // CHECK: test_vsli_n_s16
- return vsli_n_s16(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsli.v4i16
- // CHECK_CODEGEN: sli.4h v0, v1, #3
-}
-
-int32x2_t test_vsli_n_s32(int32x2_t a1, int32x2_t a2) {
- // CHECK: test_vsli_n_s32
- return vsli_n_s32(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsli.v2i32
- // CHECK_CODEGEN: sli.2s v0, v1, #1
-}
-
-int64x1_t test_vsli_n_s64(int64x1_t a1, int64x1_t a2) {
- // CHECK: test_vsli_n_s64
- return vsli_n_s64(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsli.v1i64
- // CHECK_CODEGEN: sli d0, d1, #1
-}
-
-uint8x8_t test_vsli_n_u8(uint8x8_t a1, uint8x8_t a2) {
- // CHECK: test_vsli_n_u8
- return vsli_n_u8(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsli.v8i8
- // CHECK_CODEGEN: sli.8b v0, v1, #3
-}
-
-uint16x4_t test_vsli_n_u16(uint16x4_t a1, uint16x4_t a2) {
- // CHECK: test_vsli_n_u16
- return vsli_n_u16(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsli.v4i16
- // CHECK_CODEGEN: sli.4h v0, v1, #3
-}
-
-uint32x2_t test_vsli_n_u32(uint32x2_t a1, uint32x2_t a2) {
- // CHECK: test_vsli_n_u32
- return vsli_n_u32(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsli.v2i32
- // CHECK_CODEGEN: sli.2s v0, v1, #1
-}
-
-uint64x1_t test_vsli_n_u64(uint64x1_t a1, uint64x1_t a2) {
- // CHECK: test_vsli_n_u64
- return vsli_n_u64(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsli.v1i64
- // CHECK_CODEGEN: sli d0, d1, #1
-}
-
-poly8x8_t test_vsli_n_p8(poly8x8_t a1, poly8x8_t a2) {
- // CHECK: test_vsli_n_p8
- return vsli_n_p8(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsli.v8i8
- // CHECK_CODEGEN: sli.8b v0, v1, #1
-}
-
-poly16x4_t test_vsli_n_p16(poly16x4_t a1, poly16x4_t a2) {
- // CHECK: test_vsli_n_p16
- return vsli_n_p16(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsli.v4i16
- // CHECK_CODEGEN: sli.4h v0, v1, #1
-}
-
-int8x16_t test_vsliq_n_s8(int8x16_t a1, int8x16_t a2) {
- // CHECK: test_vsliq_n_s8
- return vsliq_n_s8(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsli.v16i8
- // CHECK_CODEGEN: sli.16b v0, v1, #3
-}
-
-int16x8_t test_vsliq_n_s16(int16x8_t a1, int16x8_t a2) {
- // CHECK: test_vsliq_n_s16
- return vsliq_n_s16(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsli.v8i16
- // CHECK_CODEGEN: sli.8h v0, v1, #3
-}
-
-int32x4_t test_vsliq_n_s32(int32x4_t a1, int32x4_t a2) {
- // CHECK: test_vsliq_n_s32
- return vsliq_n_s32(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsli.v4i32
- // CHECK_CODEGEN: sli.4s v0, v1, #1
-}
-
-int64x2_t test_vsliq_n_s64(int64x2_t a1, int64x2_t a2) {
- // CHECK: test_vsliq_n_s64
- return vsliq_n_s64(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsli.v2i64
- // CHECK_CODEGEN: sli.2d v0, v1, #1
-}
-
-uint8x16_t test_vsliq_n_u8(uint8x16_t a1, uint8x16_t a2) {
- // CHECK: test_vsliq_n_u8
- return vsliq_n_u8(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsli.v16i8
- // CHECK_CODEGEN: sli.16b v0, v1, #3
-}
-
-uint16x8_t test_vsliq_n_u16(uint16x8_t a1, uint16x8_t a2) {
- // CHECK: test_vsliq_n_u16
- return vsliq_n_u16(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsli.v8i16
- // CHECK_CODEGEN: sli.8h v0, v1, #3
-}
-
-uint32x4_t test_vsliq_n_u32(uint32x4_t a1, uint32x4_t a2) {
- // CHECK: test_vsliq_n_u32
- return vsliq_n_u32(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsli.v4i32
- // CHECK_CODEGEN: sli.4s v0, v1, #1
-}
-
-uint64x2_t test_vsliq_n_u64(uint64x2_t a1, uint64x2_t a2) {
- // CHECK: test_vsliq_n_u64
- return vsliq_n_u64(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsli.v2i64
- // CHECK_CODEGEN: sli.2d v0, v1, #1
-}
-
-poly8x16_t test_vsliq_n_p8(poly8x16_t a1, poly8x16_t a2) {
- // CHECK: test_vsliq_n_p8
- return vsliq_n_p8(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsli.v16i8
- // CHECK_CODEGEN: sli.16b v0, v1, #1
-}
-
-poly16x8_t test_vsliq_n_p16(poly16x8_t a1, poly16x8_t a2) {
- // CHECK: test_vsliq_n_p16
- return vsliq_n_p16(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsli.v8i16
- // CHECK_CODEGEN: sli.8h v0, v1, #1
-}
-
Removed: cfe/trunk/test/CodeGen/arm64_vsri.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vsri.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vsri.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vsri.c (removed)
@@ -1,149 +0,0 @@
-// RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
-// RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - %s | \
-// RUN: FileCheck -check-prefix=CHECK_CODEGEN %s
-// REQUIRES: aarch64-registered-target
-
-// Test ARM64 SIMD vector shift right and insert: vsri[q]_n_*
-
-#include <arm_neon.h>
-
-int8x8_t test_vsri_n_s8(int8x8_t a1, int8x8_t a2) {
- // CHECK: test_vsri_n_s8
- return vsri_n_s8(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsri.v8i8
- // CHECK_CODEGEN: sri.8b v0, v1, #3
-}
-
-int16x4_t test_vsri_n_s16(int16x4_t a1, int16x4_t a2) {
- // CHECK: test_vsri_n_s16
- return vsri_n_s16(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsri.v4i16
- // CHECK_CODEGEN: sri.4h v0, v1, #3
-}
-
-int32x2_t test_vsri_n_s32(int32x2_t a1, int32x2_t a2) {
- // CHECK: test_vsri_n_s32
- return vsri_n_s32(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsri.v2i32
- // CHECK_CODEGEN: sri.2s v0, v1, #1
-}
-
-int64x1_t test_vsri_n_s64(int64x1_t a1, int64x1_t a2) {
- // CHECK: test_vsri_n_s64
- return vsri_n_s64(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsri.v1i64
- // CHECK_CODEGEN: sri d0, d1, #1
-}
-
-uint8x8_t test_vsri_n_u8(uint8x8_t a1, uint8x8_t a2) {
- // CHECK: test_vsri_n_u8
- return vsri_n_u8(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsri.v8i8
- // CHECK_CODEGEN: sri.8b v0, v1, #3
-}
-
-uint16x4_t test_vsri_n_u16(uint16x4_t a1, uint16x4_t a2) {
- // CHECK: test_vsri_n_u16
- return vsri_n_u16(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsri.v4i16
- // CHECK_CODEGEN: sri.4h v0, v1, #3
-}
-
-uint32x2_t test_vsri_n_u32(uint32x2_t a1, uint32x2_t a2) {
- // CHECK: test_vsri_n_u32
- return vsri_n_u32(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsri.v2i32
- // CHECK_CODEGEN: sri.2s v0, v1, #1
-}
-
-uint64x1_t test_vsri_n_u64(uint64x1_t a1, uint64x1_t a2) {
- // CHECK: test_vsri_n_u64
- return vsri_n_u64(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsri.v1i64
- // CHECK_CODEGEN: sri d0, d1, #1
-}
-
-poly8x8_t test_vsri_n_p8(poly8x8_t a1, poly8x8_t a2) {
- // CHECK: test_vsri_n_p8
- return vsri_n_p8(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsri.v8i8
- // CHECK_CODEGEN: sri.8b v0, v1, #1
-}
-
-poly16x4_t test_vsri_n_p16(poly16x4_t a1, poly16x4_t a2) {
- // CHECK: test_vsri_n_p16
- return vsri_n_p16(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsri.v4i16
- // CHECK_CODEGEN: sri.4h v0, v1, #1
-}
-
-int8x16_t test_vsriq_n_s8(int8x16_t a1, int8x16_t a2) {
- // CHECK: test_vsriq_n_s8
- return vsriq_n_s8(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsri.v16i8
- // CHECK_CODEGEN: sri.16b v0, v1, #3
-}
-
-int16x8_t test_vsriq_n_s16(int16x8_t a1, int16x8_t a2) {
- // CHECK: test_vsriq_n_s16
- return vsriq_n_s16(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsri.v8i16
- // CHECK_CODEGEN: sri.8h v0, v1, #3
-}
-
-int32x4_t test_vsriq_n_s32(int32x4_t a1, int32x4_t a2) {
- // CHECK: test_vsriq_n_s32
- return vsriq_n_s32(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsri.v4i32
- // CHECK_CODEGEN: sri.4s v0, v1, #1
-}
-
-int64x2_t test_vsriq_n_s64(int64x2_t a1, int64x2_t a2) {
- // CHECK: test_vsriq_n_s64
- return vsriq_n_s64(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsri.v2i64
- // CHECK_CODEGEN: sri.2d v0, v1, #1
-}
-
-uint8x16_t test_vsriq_n_u8(uint8x16_t a1, uint8x16_t a2) {
- // CHECK: test_vsriq_n_u8
- return vsriq_n_u8(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsri.v16i8
- // CHECK_CODEGEN: sri.16b v0, v1, #3
-}
-
-uint16x8_t test_vsriq_n_u16(uint16x8_t a1, uint16x8_t a2) {
- // CHECK: test_vsriq_n_u16
- return vsriq_n_u16(a1, a2, 3);
- // CHECK: llvm.aarch64.neon.vsri.v8i16
- // CHECK_CODEGEN: sri.8h v0, v1, #3
-}
-
-uint32x4_t test_vsriq_n_u32(uint32x4_t a1, uint32x4_t a2) {
- // CHECK: test_vsriq_n_u32
- return vsriq_n_u32(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsri.v4i32
- // CHECK_CODEGEN: sri.4s v0, v1, #1
-}
-
-uint64x2_t test_vsriq_n_u64(uint64x2_t a1, uint64x2_t a2) {
- // CHECK: test_vsriq_n_u64
- return vsriq_n_u64(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsri.v2i64
- // CHECK_CODEGEN: sri.2d v0, v1, #1
-}
-
-poly8x16_t test_vsriq_n_p8(poly8x16_t a1, poly8x16_t a2) {
- // CHECK: test_vsriq_n_p8
- return vsriq_n_p8(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsri.v16i8
- // CHECK_CODEGEN: sri.16b v0, v1, #1
-}
-
-poly16x8_t test_vsriq_n_p16(poly16x8_t a1, poly16x8_t a2) {
- // CHECK: test_vsriq_n_p16
- return vsriq_n_p16(a1, a2, 1);
- // CHECK: llvm.aarch64.neon.vsri.v8i16
- // CHECK_CODEGEN: sri.8h v0, v1, #1
-}
-
Removed: cfe/trunk/test/CodeGen/arm64_vtst.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64_vtst.c?rev=262963&view=auto
==============================================================================
--- cfe/trunk/test/CodeGen/arm64_vtst.c (original)
+++ cfe/trunk/test/CodeGen/arm64_vtst.c (removed)
@@ -1,22 +0,0 @@
-// RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
-// Test ARM64 SIMD comparison test intrinsics
-
-#include <arm_neon.h>
-
-uint64x2_t test_vtstq_s64(int64x2_t a1, int64x2_t a2) {
- // CHECK-LABEL: test_vtstq_s64
- return vtstq_s64(a1, a2);
- // CHECK: [[COMMONBITS:%[A-Za-z0-9.]+]] = and <2 x i64> {{%a1, %a2|%a2, %a1}}
- // CHECK: [[MASK:%[A-Za-z0-9.]+]] = icmp ne <2 x i64> [[COMMONBITS]], zeroinitializer
- // CHECK: [[RES:%[A-Za-z0-9.]+]] = sext <2 x i1> [[MASK]] to <2 x i64>
- // CHECK: ret <2 x i64> [[RES]]
-}
-
-uint64x2_t test_vtstq_u64(uint64x2_t a1, uint64x2_t a2) {
- // CHECK-LABEL: test_vtstq_u64
- return vtstq_u64(a1, a2);
- // CHECK: [[COMMONBITS:%[A-Za-z0-9.]+]] = and <2 x i64> {{%a1, %a2|%a2, %a1}}
- // CHECK: [[MASK:%[A-Za-z0-9.]+]] = icmp ne <2 x i64> [[COMMONBITS]], zeroinitializer
- // CHECK: [[RES:%[A-Za-z0-9.]+]] = sext <2 x i1> [[MASK]] to <2 x i64>
- // CHECK: ret <2 x i64> [[RES]]
-}
More information about the cfe-commits
mailing list