[clang] d929146 - [Clang][AArch64] Lower NEON vaddv/vminv/vmaxv builtins to llvm.vector.reduce intrinsics. (#165400)
via cfe-commits
cfe-commits at lists.llvm.org
Thu Oct 30 03:46:42 PDT 2025
Author: Paul Walker
Date: 2025-10-30T10:46:37Z
New Revision: d929146b3fcd7bafe364a053355bfe35b5e1fdbf
URL: https://github.com/llvm/llvm-project/commit/d929146b3fcd7bafe364a053355bfe35b5e1fdbf
DIFF: https://github.com/llvm/llvm-project/commit/d929146b3fcd7bafe364a053355bfe35b5e1fdbf.diff
LOG: [Clang][AArch64] Lower NEON vaddv/vminv/vmaxv builtins to llvm.vector.reduce intrinsics. (#165400)
This is the first step in removing some NEON reduction intrinsics that
duplicate the behaviour of their llvm.vector.reduce counterpart.
NOTE: The i8/i16 variants differ in that the NEON versions return an i32
result. However, this looks more about making their code generation
convenient with SelectionDAG disgarding the extra bits. This is only
relevant for the next phase because the Clang usage always truncate
their result, making llvm.vector.reduce a drop in replacement.
Added:
Modified:
clang/lib/CodeGen/TargetBuiltins/ARM.cpp
clang/test/CodeGen/AArch64/neon-across.c
clang/test/CodeGen/AArch64/neon-intrinsics.c
Removed:
################################################################################
diff --git a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
index 60f9b86333670..15fa78ddba715 100644
--- a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
@@ -1193,14 +1193,22 @@ static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddv_s16, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddv_s32, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddv_s8, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddv_u16, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddv_u32, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddv_u8, vector_reduce_add, Add1ArgType),
NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
- NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vaddvq_s16, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddvq_s32, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddvq_s64, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddvq_s8, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddvq_u16, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddvq_u32, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddvq_u64, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vaddvq_u8, vector_reduce_add, Add1ArgType),
NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
@@ -1243,27 +1251,43 @@ static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxv_s16, vector_reduce_smax, Add1ArgType),
+ NEONMAP1(vmaxv_s32, vector_reduce_smax, Add1ArgType),
+ NEONMAP1(vmaxv_s8, vector_reduce_smax, Add1ArgType),
+ NEONMAP1(vmaxv_u16, vector_reduce_umax, Add1ArgType),
+ NEONMAP1(vmaxv_u32, vector_reduce_umax, Add1ArgType),
+ NEONMAP1(vmaxv_u8, vector_reduce_umax, Add1ArgType),
NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
- NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
+ NEONMAP1(vmaxvq_s16, vector_reduce_smax, Add1ArgType),
+ NEONMAP1(vmaxvq_s32, vector_reduce_smax, Add1ArgType),
+ NEONMAP1(vmaxvq_s8, vector_reduce_smax, Add1ArgType),
+ NEONMAP1(vmaxvq_u16, vector_reduce_umax, Add1ArgType),
+ NEONMAP1(vmaxvq_u32, vector_reduce_umax, Add1ArgType),
+ NEONMAP1(vmaxvq_u8, vector_reduce_umax, Add1ArgType),
NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
- NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
- NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminv_s16, vector_reduce_smin, Add1ArgType),
+ NEONMAP1(vminv_s32, vector_reduce_smin, Add1ArgType),
+ NEONMAP1(vminv_s8, vector_reduce_smin, Add1ArgType),
+ NEONMAP1(vminv_u16, vector_reduce_umin, Add1ArgType),
+ NEONMAP1(vminv_u32, vector_reduce_umin, Add1ArgType),
+ NEONMAP1(vminv_u8, vector_reduce_umin, Add1ArgType),
NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
- NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
- NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
+ NEONMAP1(vminvq_s16, vector_reduce_smin, Add1ArgType),
+ NEONMAP1(vminvq_s32, vector_reduce_smin, Add1ArgType),
+ NEONMAP1(vminvq_s8, vector_reduce_smin, Add1ArgType),
+ NEONMAP1(vminvq_u16, vector_reduce_umin, Add1ArgType),
+ NEONMAP1(vminvq_u32, vector_reduce_umin, Add1ArgType),
+ NEONMAP1(vminvq_u8, vector_reduce_umin, Add1ArgType),
NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
- NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
- NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
+ NEONMAP1(vpaddd_s64, vector_reduce_add, Add1ArgType),
+ NEONMAP1(vpaddd_u64, vector_reduce_add, Add1ArgType),
NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
@@ -7067,127 +7091,6 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Int = Intrinsic::bitreverse;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
}
- case NEON::BI__builtin_neon_vaddv_u8:
- // FIXME: These are handled by the AArch64 scalar code.
- usgn = true;
- [[fallthrough]];
- case NEON::BI__builtin_neon_vaddv_s8: {
- Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vaddv_u16:
- usgn = true;
- [[fallthrough]];
- case NEON::BI__builtin_neon_vaddv_s16: {
- Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vaddvq_u8:
- usgn = true;
- [[fallthrough]];
- case NEON::BI__builtin_neon_vaddvq_s8: {
- Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vaddvq_u16:
- usgn = true;
- [[fallthrough]];
- case NEON::BI__builtin_neon_vaddvq_s16: {
- Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vmaxv_u8: {
- Int = Intrinsic::aarch64_neon_umaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vmaxv_u16: {
- Int = Intrinsic::aarch64_neon_umaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vmaxvq_u8: {
- Int = Intrinsic::aarch64_neon_umaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vmaxvq_u16: {
- Int = Intrinsic::aarch64_neon_umaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vmaxv_s8: {
- Int = Intrinsic::aarch64_neon_smaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vmaxv_s16: {
- Int = Intrinsic::aarch64_neon_smaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vmaxvq_s8: {
- Int = Intrinsic::aarch64_neon_smaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vmaxvq_s16: {
- Int = Intrinsic::aarch64_neon_smaxv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
case NEON::BI__builtin_neon_vmaxv_f16: {
Int = Intrinsic::aarch64_neon_fmaxv;
Ty = HalfTy;
@@ -7206,78 +7109,6 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
- case NEON::BI__builtin_neon_vminv_u8: {
- Int = Intrinsic::aarch64_neon_uminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vminv_u16: {
- Int = Intrinsic::aarch64_neon_uminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vminvq_u8: {
- Int = Intrinsic::aarch64_neon_uminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vminvq_u16: {
- Int = Intrinsic::aarch64_neon_uminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vminv_s8: {
- Int = Intrinsic::aarch64_neon_sminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vminv_s16: {
- Int = Intrinsic::aarch64_neon_sminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 4);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
- case NEON::BI__builtin_neon_vminvq_s8: {
- Int = Intrinsic::aarch64_neon_sminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int8Ty);
- }
- case NEON::BI__builtin_neon_vminvq_s16: {
- Int = Intrinsic::aarch64_neon_sminv;
- Ty = Int32Ty;
- VTy = llvm::FixedVectorType::get(Int16Ty, 8);
- llvm::Type *Tys[2] = { Ty, VTy };
- Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
- return Builder.CreateTrunc(Ops[0], Int16Ty);
- }
case NEON::BI__builtin_neon_vminv_f16: {
Int = Intrinsic::aarch64_neon_fminv;
Ty = HalfTy;
diff --git a/clang/test/CodeGen/AArch64/neon-across.c b/clang/test/CodeGen/AArch64/neon-across.c
index aa0387d89dfef..aae5097da7789 100644
--- a/clang/test/CodeGen/AArch64/neon-across.c
+++ b/clang/test/CodeGen/AArch64/neon-across.c
@@ -113,9 +113,8 @@ uint64_t test_vaddlvq_u32(uint32x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxv_s8
// CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMAXV_S8_I:%.*]] = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMAXV_S8_I]]
//
int8_t test_vmaxv_s8(int8x8_t a) {
return vmaxv_s8(a);
@@ -124,9 +123,8 @@ int8_t test_vmaxv_s8(int8x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxv_s16
// CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMAXV_S16_I:%.*]] = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMAXV_S16_I]]
//
int16_t test_vmaxv_s16(int16x4_t a) {
return vmaxv_s16(a);
@@ -135,9 +133,8 @@ int16_t test_vmaxv_s16(int16x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxv_u8
// CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMAXV_U8_I:%.*]] = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMAXV_U8_I]]
//
uint8_t test_vmaxv_u8(uint8x8_t a) {
return vmaxv_u8(a);
@@ -146,9 +143,8 @@ uint8_t test_vmaxv_u8(uint8x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxv_u16
// CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMAXV_U16_I:%.*]] = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMAXV_U16_I]]
//
uint16_t test_vmaxv_u16(uint16x4_t a) {
return vmaxv_u16(a);
@@ -157,9 +153,8 @@ uint16_t test_vmaxv_u16(uint16x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_s8
// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMAXVQ_S8_I:%.*]] = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMAXVQ_S8_I]]
//
int8_t test_vmaxvq_s8(int8x16_t a) {
return vmaxvq_s8(a);
@@ -168,9 +163,8 @@ int8_t test_vmaxvq_s8(int8x16_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_s16
// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMAXVQ_S16_I:%.*]] = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMAXVQ_S16_I]]
//
int16_t test_vmaxvq_s16(int16x8_t a) {
return vmaxvq_s16(a);
@@ -179,7 +173,7 @@ int16_t test_vmaxvq_s16(int16x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_s32
// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> [[A]])
+// CHECK-NEXT: [[VMAXVQ_S32_I:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMAXVQ_S32_I]]
//
int32_t test_vmaxvq_s32(int32x4_t a) {
@@ -189,9 +183,8 @@ int32_t test_vmaxvq_s32(int32x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_u8
// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMAXVQ_U8_I:%.*]] = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMAXVQ_U8_I]]
//
uint8_t test_vmaxvq_u8(uint8x16_t a) {
return vmaxvq_u8(a);
@@ -200,9 +193,8 @@ uint8_t test_vmaxvq_u8(uint8x16_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_u16
// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMAXVQ_U16_I:%.*]] = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMAXVQ_U16_I]]
//
uint16_t test_vmaxvq_u16(uint16x8_t a) {
return vmaxvq_u16(a);
@@ -211,7 +203,7 @@ uint16_t test_vmaxvq_u16(uint16x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vmaxvq_u32
// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMAXVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> [[A]])
+// CHECK-NEXT: [[VMAXVQ_U32_I:%.*]] = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMAXVQ_U32_I]]
//
uint32_t test_vmaxvq_u32(uint32x4_t a) {
@@ -221,9 +213,8 @@ uint32_t test_vmaxvq_u32(uint32x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminv_s8
// CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMINV_S8_I:%.*]] = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMINV_S8_I]]
//
int8_t test_vminv_s8(int8x8_t a) {
return vminv_s8(a);
@@ -232,9 +223,8 @@ int8_t test_vminv_s8(int8x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminv_s16
// CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMINV_S16_I:%.*]] = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMINV_S16_I]]
//
int16_t test_vminv_s16(int16x4_t a) {
return vminv_s16(a);
@@ -243,9 +233,8 @@ int16_t test_vminv_s16(int16x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminv_u8
// CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMINV_U8_I:%.*]] = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMINV_U8_I]]
//
uint8_t test_vminv_u8(uint8x8_t a) {
return vminv_u8(a);
@@ -254,9 +243,8 @@ uint8_t test_vminv_u8(uint8x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminv_u16
// CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMINV_U16_I:%.*]] = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMINV_U16_I]]
//
uint16_t test_vminv_u16(uint16x4_t a) {
return vminv_u16(a);
@@ -265,9 +253,8 @@ uint16_t test_vminv_u16(uint16x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_s8
// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMINVQ_S8_I:%.*]] = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMINVQ_S8_I]]
//
int8_t test_vminvq_s8(int8x16_t a) {
return vminvq_s8(a);
@@ -276,9 +263,8 @@ int8_t test_vminvq_s8(int8x16_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_s16
// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMINVQ_S16_I:%.*]] = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMINVQ_S16_I]]
//
int16_t test_vminvq_s16(int16x8_t a) {
return vminvq_s16(a);
@@ -287,7 +273,7 @@ int16_t test_vminvq_s16(int16x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_s32
// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> [[A]])
+// CHECK-NEXT: [[VMINVQ_S32_I:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMINVQ_S32_I]]
//
int32_t test_vminvq_s32(int32x4_t a) {
@@ -297,9 +283,8 @@ int32_t test_vminvq_s32(int32x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_u8
// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VMINVQ_U8_I:%.*]] = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VMINVQ_U8_I]]
//
uint8_t test_vminvq_u8(uint8x16_t a) {
return vminvq_u8(a);
@@ -308,9 +293,8 @@ uint8_t test_vminvq_u8(uint8x16_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_u16
// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VMINVQ_U16_I:%.*]] = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VMINVQ_U16_I]]
//
uint16_t test_vminvq_u16(uint16x8_t a) {
return vminvq_u16(a);
@@ -319,7 +303,7 @@ uint16_t test_vminvq_u16(uint16x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vminvq_u32
// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VMINVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> [[A]])
+// CHECK-NEXT: [[VMINVQ_U32_I:%.*]] = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMINVQ_U32_I]]
//
uint32_t test_vminvq_u32(uint32x4_t a) {
@@ -329,9 +313,8 @@ uint32_t test_vminvq_u32(uint32x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddv_s8
// CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VADDV_S8_I:%.*]] = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VADDV_S8_I]]
//
int8_t test_vaddv_s8(int8x8_t a) {
return vaddv_s8(a);
@@ -340,9 +323,8 @@ int8_t test_vaddv_s8(int8x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddv_s16
// CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VADDV_S16_I:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VADDV_S16_I]]
//
int16_t test_vaddv_s16(int16x4_t a) {
return vaddv_s16(a);
@@ -351,9 +333,8 @@ int16_t test_vaddv_s16(int16x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddv_u8
// CHECK-SAME: (<8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VADDV_U8_I:%.*]] = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VADDV_U8_I]]
//
uint8_t test_vaddv_u8(uint8x8_t a) {
return vaddv_u8(a);
@@ -362,9 +343,8 @@ uint8_t test_vaddv_u8(uint8x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddv_u16
// CHECK-SAME: (<4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VADDV_U16_I:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VADDV_U16_I]]
//
uint16_t test_vaddv_u16(uint16x4_t a) {
return vaddv_u16(a);
@@ -373,9 +353,8 @@ uint16_t test_vaddv_u16(uint16x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_s8
// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VADDVQ_S8_I:%.*]] = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VADDVQ_S8_I]]
//
int8_t test_vaddvq_s8(int8x16_t a) {
return vaddvq_s8(a);
@@ -384,9 +363,8 @@ int8_t test_vaddvq_s8(int8x16_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_s16
// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VADDVQ_S16_I:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VADDVQ_S16_I]]
//
int16_t test_vaddvq_s16(int16x8_t a) {
return vaddvq_s16(a);
@@ -395,7 +373,7 @@ int16_t test_vaddvq_s16(int16x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_s32
// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> [[A]])
+// CHECK-NEXT: [[VADDVQ_S32_I:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VADDVQ_S32_I]]
//
int32_t test_vaddvq_s32(int32x4_t a) {
@@ -405,9 +383,8 @@ int32_t test_vaddvq_s32(int32x4_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_u8
// CHECK-SAME: (<16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
-// CHECK-NEXT: ret i8 [[TMP0]]
+// CHECK-NEXT: [[VADDVQ_U8_I:%.*]] = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> [[A]])
+// CHECK-NEXT: ret i8 [[VADDVQ_U8_I]]
//
uint8_t test_vaddvq_u8(uint8x16_t a) {
return vaddvq_u8(a);
@@ -416,9 +393,8 @@ uint8_t test_vaddvq_u8(uint8x16_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_u16
// CHECK-SAME: (<8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16> [[A]])
-// CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i16
-// CHECK-NEXT: ret i16 [[TMP0]]
+// CHECK-NEXT: [[VADDVQ_U16_I:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[A]])
+// CHECK-NEXT: ret i16 [[VADDVQ_U16_I]]
//
uint16_t test_vaddvq_u16(uint16x8_t a) {
return vaddvq_u16(a);
@@ -427,7 +403,7 @@ uint16_t test_vaddvq_u16(uint16x8_t a) {
// CHECK-LABEL: define {{[^@]+}}@test_vaddvq_u32
// CHECK-SAME: (<4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[VADDVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> [[A]])
+// CHECK-NEXT: [[VADDVQ_U32_I:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VADDVQ_U32_I]]
//
uint32_t test_vaddvq_u32(uint32x4_t a) {
diff --git a/clang/test/CodeGen/AArch64/neon-intrinsics.c b/clang/test/CodeGen/AArch64/neon-intrinsics.c
index 035e1ca1b45e8..1c628bbba483f 100644
--- a/clang/test/CodeGen/AArch64/neon-intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon-intrinsics.c
@@ -12643,7 +12643,7 @@ uint64_t test_vqrshld_u64(uint64_t a, int64_t b) {
// CHECK-LABEL: define dso_local i64 @test_vpaddd_s64(
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VPADDD_S64_I:%.*]] = call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> [[A]])
+// CHECK-NEXT: [[VPADDD_S64_I:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[A]])
// CHECK-NEXT: ret i64 [[VPADDD_S64_I]]
//
int64_t test_vpaddd_s64(int64x2_t a) {
@@ -23227,7 +23227,7 @@ uint64x2_t test_vpaddq_u64(uint64x2_t a, uint64x2_t b) {
// CHECK-LABEL: define dso_local i64 @test_vpaddd_u64(
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VPADDD_U64_I:%.*]] = call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> [[A]])
+// CHECK-NEXT: [[VPADDD_U64_I:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[A]])
// CHECK-NEXT: ret i64 [[VPADDD_U64_I]]
//
uint64_t test_vpaddd_u64(uint64x2_t a) {
@@ -23237,7 +23237,7 @@ uint64_t test_vpaddd_u64(uint64x2_t a) {
// CHECK-LABEL: define dso_local i64 @test_vaddvq_s64(
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VADDVQ_S64_I:%.*]] = call i64 @llvm.aarch64.neon.saddv.i64.v2i64(<2 x i64> [[A]])
+// CHECK-NEXT: [[VADDVQ_S64_I:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[A]])
// CHECK-NEXT: ret i64 [[VADDVQ_S64_I]]
//
int64_t test_vaddvq_s64(int64x2_t a) {
@@ -23247,7 +23247,7 @@ int64_t test_vaddvq_s64(int64x2_t a) {
// CHECK-LABEL: define dso_local i64 @test_vaddvq_u64(
// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VADDVQ_U64_I:%.*]] = call i64 @llvm.aarch64.neon.uaddv.i64.v2i64(<2 x i64> [[A]])
+// CHECK-NEXT: [[VADDVQ_U64_I:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[A]])
// CHECK-NEXT: ret i64 [[VADDVQ_U64_I]]
//
uint64_t test_vaddvq_u64(uint64x2_t a) {
@@ -23878,7 +23878,7 @@ float64x1_t test_vrsqrts_f64(float64x1_t a, float64x1_t b) {
// CHECK-LABEL: define dso_local i32 @test_vminv_s32(
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VMINV_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v2i32(<2 x i32> [[A]])
+// CHECK-NEXT: [[VMINV_S32_I:%.*]] = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMINV_S32_I]]
//
int32_t test_vminv_s32(int32x2_t a) {
@@ -23888,7 +23888,7 @@ int32_t test_vminv_s32(int32x2_t a) {
// CHECK-LABEL: define dso_local i32 @test_vminv_u32(
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VMINV_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v2i32(<2 x i32> [[A]])
+// CHECK-NEXT: [[VMINV_U32_I:%.*]] = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMINV_U32_I]]
//
uint32_t test_vminv_u32(uint32x2_t a) {
@@ -23898,7 +23898,7 @@ uint32_t test_vminv_u32(uint32x2_t a) {
// CHECK-LABEL: define dso_local i32 @test_vmaxv_s32(
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VMAXV_S32_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v2i32(<2 x i32> [[A]])
+// CHECK-NEXT: [[VMAXV_S32_I:%.*]] = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMAXV_S32_I]]
//
int32_t test_vmaxv_s32(int32x2_t a) {
@@ -23908,7 +23908,7 @@ int32_t test_vmaxv_s32(int32x2_t a) {
// CHECK-LABEL: define dso_local i32 @test_vmaxv_u32(
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VMAXV_U32_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v2i32(<2 x i32> [[A]])
+// CHECK-NEXT: [[VMAXV_U32_I:%.*]] = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VMAXV_U32_I]]
//
uint32_t test_vmaxv_u32(uint32x2_t a) {
@@ -23918,7 +23918,7 @@ uint32_t test_vmaxv_u32(uint32x2_t a) {
// CHECK-LABEL: define dso_local i32 @test_vaddv_s32(
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VADDV_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v2i32(<2 x i32> [[A]])
+// CHECK-NEXT: [[VADDV_S32_I:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VADDV_S32_I]]
//
int32_t test_vaddv_s32(int32x2_t a) {
@@ -23928,7 +23928,7 @@ int32_t test_vaddv_s32(int32x2_t a) {
// CHECK-LABEL: define dso_local i32 @test_vaddv_u32(
// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VADDV_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v2i32(<2 x i32> [[A]])
+// CHECK-NEXT: [[VADDV_U32_I:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[A]])
// CHECK-NEXT: ret i32 [[VADDV_U32_I]]
//
uint32_t test_vaddv_u32(uint32x2_t a) {
More information about the cfe-commits
mailing list