[clang] [CIR][AArch64] Upstream NEON Minimum (PR #187935)
Zhihui Yang via cfe-commits
cfe-commits at lists.llvm.org
Tue Mar 24 08:41:45 PDT 2026
https://github.com/YGGkk updated https://github.com/llvm/llvm-project/pull/187935
>From f14970ff206e9772d2c1fa1ba7b5ae04d5ccfd06 Mon Sep 17 00:00:00 2001
From: Zhihui Yang <youngwisdm at gmail.com>
Date: Sun, 22 Mar 2026 08:18:45 -0700
Subject: [PATCH 01/15] [CIR][AArch64] Upstream NEON Minimum
---
.../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 14 ++
clang/test/CodeGen/AArch64/neon/intrinsics.c | 167 ++++++++++++++++++
2 files changed, 181 insertions(+)
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index 5d7b8d839fa84..aafcac244fe65 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -2873,8 +2873,16 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr,
case NEON::BI__builtin_neon_vmax_v:
case NEON::BI__builtin_neon_vmaxq_v:
case NEON::BI__builtin_neon_vmaxh_f16:
+ cgm.errorNYI(expr->getSourceRange(),
+ std::string("unimplemented AArch64 builtin call: ") +
+ getContext().BuiltinInfo.getName(builtinID));
+ return mlir::Value{};
case NEON::BI__builtin_neon_vmin_v:
case NEON::BI__builtin_neon_vminq_v:
+ intrName = usgn ? "aarch64.neon.umin" : "aarch64.neon.smin";
+ if(cir::isFPOrVectorOfFPType(ty))
+ intrName = "aarch64.neon.fmin";
+ return emitNeonCall(cgm, builder, {ty, ty}, ops, intrName, ty, loc);
case NEON::BI__builtin_neon_vminh_f16:
cgm.errorNYI(expr->getSourceRange(),
std::string("unimplemented AArch64 builtin call: ") +
@@ -2892,8 +2900,14 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr,
case NEON::BI__builtin_neon_vpminq_v:
case NEON::BI__builtin_neon_vpmax_v:
case NEON::BI__builtin_neon_vpmaxq_v:
+ cgm.errorNYI(expr->getSourceRange(),
+ std::string("unimplemented AArch64 builtin call: ") +
+ getContext().BuiltinInfo.getName(builtinID));
+ return mlir::Value{};
case NEON::BI__builtin_neon_vminnm_v:
case NEON::BI__builtin_neon_vminnmq_v:
+ intrName = "aarch64.neon.fminnm";
+ return emitNeonCall(cgm, builder, {ty, ty}, ops, intrName, ty, loc);
case NEON::BI__builtin_neon_vminnmh_f16:
case NEON::BI__builtin_neon_vmaxnm_v:
case NEON::BI__builtin_neon_vmaxnmq_v:
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index bf8e62feda8da..fcc0b14c88695 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -982,3 +982,170 @@ int64_t test_vshld_u64(int64_t a,int64_t b) {
return (int64_t)vshld_u64(a, b);
}
+//===----------------------------------------------------------------------===//
+// 2.1.8 Minimum
+// https://arm-software.github.io/acle/neon_intrinsics/advsimd.html#minimum
+//===----------------------------------------------------------------------===//
+
+// ALL-LABEL: @test_vmin_s8
+int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s8i>, !cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
+ // LLVM: [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
+ // LLVM: ret <8 x i8> [[VMIN_I]]
+ return vmin_s8(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_s16
+int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s16i>, !cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> [[V1]], <4 x i16> [[V2]])
+ // LLVM-NEXT: ret <4 x i16> [[VMIN_I]]
+ return vmin_s16(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_s32
+int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !s32i>, !cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
+ // LLVM: [[VMIN_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> [[V1]], <2 x i32> [[V2]])
+ // LLVM-NEXT: ret <2 x i32> [[VMIN_I]]
+ return vmin_s32(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_u8
+uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u8i>, !cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
+ // LLVM: [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
+ // LLVM-NEXT: ret <8 x i8> [[VMIN_I]]
+ return vmin_u8(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_u16
+uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u16i>, !cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> [[V1]], <4 x i16> [[V2]])
+ // LLVM-NEXT: ret <4 x i16> [[VMIN_I]]
+ return vmin_u16(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_u32
+uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !u32i>, !cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
+ // LLVM: [[VMIN_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> [[V1]], <2 x i32> [[V2]])
+ // LLVM-NEXT: ret <2 x i32> [[VMIN_I]]
+ return vmin_u32(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_s8
+int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
+ // LLVM: [[VMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
+ // LLVM-NEXT: ret <16 x i8> [[VMIN_I]]
+ return vminq_s8(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_s16
+int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
+ // LLVM: [[VMIN_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> [[V1]], <8 x i16> [[V2]])
+ // LLVM-NEXT: ret <8 x i16> [[VMIN_I]]
+ return vminq_s16(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_s32
+int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32> [[V1]], <4 x i32> [[V2]])
+ // LLVM-NEXT: ret <4 x i32> [[VMIN_I]]
+ return vminq_s32(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_u8
+uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !u8i>, !cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
+ // LLVM: [[VMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
+ // LLVM-NEXT: ret <16 x i8> [[VMIN_I]]
+ return vminq_u8(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_u16
+uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u16i>, !cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
+ // LLVM: [[VMIN_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> [[V1]], <8 x i16> [[V2]])
+ // LLVM-NEXT: ret <8 x i16> [[VMIN_I]]
+ return vminq_u16(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_u32
+uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u32i>, !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32> [[V1]], <4 x i32> [[V2]])
+ // LLVM-NEXT: ret <4 x i32> [[VMIN_I]]
+ return vminq_u32(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_f32
+float32x4_t test_vmin_f32(float32x4_t v1, float32x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
+ // LLVM-NEXT: ret <4 x float> [[VMIN_I]]
+ return vmin_f32(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_f64
+float64x2_t test_vmin_f64(float64x2_t v1, float64x2_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+ // LLVM: [[VMIN_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
+ // LLVM-NEXT: ret <2 x double> [[VMIN_I]]
+ return vmin_f64(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_f32
+float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
+ // LLVM-NEXT: ret <4 x float> [[VMIN_I]]
+ return vminq_f32(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_f64
+float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+ // LLVM: [[VMIN_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
+ // LLVM-NEXT: ret <2 x double> [[VMIN_I]]
+ return vminq_f64(v1, v2);
+}
+ // LLVM: [[VMIN_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
+ // LLVM-NEXT: ret <4 x float> [[VMIN_I]]
+ return vminnm_f32(v1, v2);
+}
+
+// ALL-LABEL: @test_vminnm_f64
+float64x2_t test_vminnm_f64(float64x2_t v1, float64x2_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+ // LLVM: [[VMIN_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
+ // LLVM-NEXT: ret <2 x double> [[VMIN_I]]
+ return vminnm_f64(v1, v2);
+}
+
+// ALL-LABEL: @test_vminnmq_f32
+float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
+ // LLVM-NEXT: ret <4 x float> [[VMIN_I]]
+ return vminnmq_f32(v1, v2);
+}
+
+// ALL-LABEL: @test_vminnmq_f64
+float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+ // LLVM: [[VMIN_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
+ // LLVM-NEXT: ret <2 x double> [[VMIN_I]]
+ return vminnmq_f64(v1, v2);
+}
+// ALL-LABEL: @test_vminnm_f32
+float32x4_t test_vminnm_f32(float32x4_t v1, float32x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
+ // LLVM-NEXT: ret <4 x float> [[VMIN_I]]
+ return vminnm_f32(v1, v2);
+}
\ No newline at end of file
>From f6b222ccd532444abc6e46887189295716db0111 Mon Sep 17 00:00:00 2001
From: Zhihui Yang <youngwisdm at gmail.com>
Date: Sun, 22 Mar 2026 08:20:11 -0700
Subject: [PATCH 02/15] [NEON] Fix indentation and formatting in AArch64
builtin expression handling
---
clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index aafcac244fe65..140a6e893a394 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -2873,16 +2873,16 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr,
case NEON::BI__builtin_neon_vmax_v:
case NEON::BI__builtin_neon_vmaxq_v:
case NEON::BI__builtin_neon_vmaxh_f16:
- cgm.errorNYI(expr->getSourceRange(),
+ cgm.errorNYI(expr->getSourceRange(),
std::string("unimplemented AArch64 builtin call: ") +
getContext().BuiltinInfo.getName(builtinID));
return mlir::Value{};
case NEON::BI__builtin_neon_vmin_v:
case NEON::BI__builtin_neon_vminq_v:
- intrName = usgn ? "aarch64.neon.umin" : "aarch64.neon.smin";
- if(cir::isFPOrVectorOfFPType(ty))
- intrName = "aarch64.neon.fmin";
- return emitNeonCall(cgm, builder, {ty, ty}, ops, intrName, ty, loc);
+ intrName = usgn ? "aarch64.neon.umin" : "aarch64.neon.smin";
+ if (cir::isFPOrVectorOfFPType(ty))
+ intrName = "aarch64.neon.fmin";
+ return emitNeonCall(cgm, builder, {ty, ty}, ops, intrName, ty, loc);
case NEON::BI__builtin_neon_vminh_f16:
cgm.errorNYI(expr->getSourceRange(),
std::string("unimplemented AArch64 builtin call: ") +
@@ -2900,13 +2900,13 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr,
case NEON::BI__builtin_neon_vpminq_v:
case NEON::BI__builtin_neon_vpmax_v:
case NEON::BI__builtin_neon_vpmaxq_v:
- cgm.errorNYI(expr->getSourceRange(),
+ cgm.errorNYI(expr->getSourceRange(),
std::string("unimplemented AArch64 builtin call: ") +
getContext().BuiltinInfo.getName(builtinID));
return mlir::Value{};
case NEON::BI__builtin_neon_vminnm_v:
case NEON::BI__builtin_neon_vminnmq_v:
- intrName = "aarch64.neon.fminnm";
+ intrName = "aarch64.neon.fminnm";
return emitNeonCall(cgm, builder, {ty, ty}, ops, intrName, ty, loc);
case NEON::BI__builtin_neon_vminnmh_f16:
case NEON::BI__builtin_neon_vmaxnm_v:
>From a3128e160b005a524816f4da31205ab31ceb797c Mon Sep 17 00:00:00 2001
From: Zhihui Yang <youngwisdm at gmail.com>
Date: Mon, 23 Mar 2026 07:31:25 -0700
Subject: [PATCH 03/15] [AArch64][NEON] Update vector types in vmin and vminnm
intrinsics tests for improved accuracy
---
clang/test/CodeGen/AArch64/neon/intrinsics.c | 47 +++++++++-----------
1 file changed, 22 insertions(+), 25 deletions(-)
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index fcc0b14c88695..c7cf41d950a29 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1084,18 +1084,18 @@ uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
}
// ALL-LABEL: @test_vmin_f32
-float32x4_t test_vmin_f32(float32x4_t v1, float32x4_t v2) {
- // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
- // LLVM: [[VMIN_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
- // LLVM-NEXT: ret <4 x float> [[VMIN_I]]
+float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
+ // LLVM: [[VMIN_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float> [[V1]], <2 x float> [[V2]])
+ // LLVM-NEXT: ret <2 x float> [[VMIN_I]]
return vmin_f32(v1, v2);
}
// ALL-LABEL: @test_vmin_f64
-float64x2_t test_vmin_f64(float64x2_t v1, float64x2_t v2) {
- // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
- // LLVM: [[VMIN_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
- // LLVM-NEXT: ret <2 x double> [[VMIN_I]]
+float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
+ // LLVM: [[VMIN_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> [[V1]], <1 x double> [[V2]])
+ // LLVM-NEXT: ret <1 x double> [[VMIN_I]]
return vmin_f64(v1, v2);
}
@@ -1114,16 +1114,20 @@ float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
// LLVM-NEXT: ret <2 x double> [[VMIN_I]]
return vminq_f64(v1, v2);
}
- // LLVM: [[VMIN_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
- // LLVM-NEXT: ret <4 x float> [[VMIN_I]]
+
+// ALL-LABEL: @test_vminnm_f32
+float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
+ // LLVM: [[VMIN_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fminnm.v2f32(<2 x float> [[V1]], <2 x float> [[V2]])
+ // LLVM-NEXT: ret <2 x float> [[VMIN_I]]
return vminnm_f32(v1, v2);
}
// ALL-LABEL: @test_vminnm_f64
-float64x2_t test_vminnm_f64(float64x2_t v1, float64x2_t v2) {
- // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
- // LLVM: [[VMIN_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
- // LLVM-NEXT: ret <2 x double> [[VMIN_I]]
+float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
+ // LLVM: [[VMIN_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[V1]], <1 x double> [[V2]])
+ // LLVM-NEXT: ret <1 x double> [[VMIN_I]]
return vminnm_f64(v1, v2);
}
@@ -1136,16 +1140,9 @@ float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
}
// ALL-LABEL: @test_vminnmq_f64
-float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
- // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
- // LLVM: [[VMIN_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
- // LLVM-NEXT: ret <2 x double> [[VMIN_I]]
+float64x1_t test_vminnmq_f64(float64x1_t v1, float64x1_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
+ // LLVM: [[VMIN_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[V1]], <1 x double> [[V2]])
+ // LLVM-NEXT: ret <1 x double> [[VMIN_I]]
return vminnmq_f64(v1, v2);
-}
-// ALL-LABEL: @test_vminnm_f32
-float32x4_t test_vminnm_f32(float32x4_t v1, float32x4_t v2) {
- // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
- // LLVM: [[VMIN_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
- // LLVM-NEXT: ret <4 x float> [[VMIN_I]]
- return vminnm_f32(v1, v2);
}
\ No newline at end of file
>From 981e4303a3953ecac6b57e48ce62ba5d851e1106 Mon Sep 17 00:00:00 2001
From: Zhihui Yang <youngwisdm at gmail.com>
Date: Mon, 23 Mar 2026 07:55:08 -0700
Subject: [PATCH 04/15] [NEON] Update test_vminnmq_f64 to use float64x2_t for
improved accuracy
---
clang/test/CodeGen/AArch64/neon/intrinsics.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index c7cf41d950a29..e6cc2bc374966 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1140,9 +1140,9 @@ float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
}
// ALL-LABEL: @test_vminnmq_f64
-float64x1_t test_vminnmq_f64(float64x1_t v1, float64x1_t v2) {
- // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
- // LLVM: [[VMIN_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[V1]], <1 x double> [[V2]])
- // LLVM-NEXT: ret <1 x double> [[VMIN_I]]
+float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+ // LLVM: [[VMIN_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
+ // LLVM-NEXT: ret <2 x double> [[VMIN_I]]
return vminnmq_f64(v1, v2);
}
\ No newline at end of file
>From 1f4238642225a3cc08848e05b5929e4e8049a95d Mon Sep 17 00:00:00 2001
From: Zhihui Yang <youngwisdm at gmail.com>
Date: Mon, 23 Mar 2026 08:34:59 -0700
Subject: [PATCH 05/15] [NEON] Update vmin intrinsics tests to use intermediate
results for improved accuracy
---
clang/test/CodeGen/AArch64/neon/intrinsics.c | 80 ++++++++++----------
1 file changed, 40 insertions(+), 40 deletions(-)
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index e6cc2bc374966..43ba8c8774346 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -990,159 +990,159 @@ int64_t test_vshld_u64(int64_t a,int64_t b) {
// ALL-LABEL: @test_vmin_s8
int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s8i>, !cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
- // LLVM: [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
- // LLVM: ret <8 x i8> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> [[VMIN_I]], <8 x i8> [[VMIN1_I]])
+ // LLVM: ret <8 x i8> [[VMIN2_I]]
return vmin_s8(v1, v2);
}
// ALL-LABEL: @test_vmin_s16
int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s16i>, !cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
- // LLVM: [[VMIN_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> [[V1]], <4 x i16> [[V2]])
- // LLVM-NEXT: ret <4 x i16> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> [[VMIN_I]], <4 x i16> [[VMIN1_I]])
+ // LLVM-NEXT: ret <4 x i16> [[VMIN2_I]]
return vmin_s16(v1, v2);
}
// ALL-LABEL: @test_vmin_s32
int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !s32i>, !cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
- // LLVM: [[VMIN_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> [[V1]], <2 x i32> [[V2]])
- // LLVM-NEXT: ret <2 x i32> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> [[VMIN_I]], <2 x i32> [[VMIN1_I]])
+ // LLVM-NEXT: ret <2 x i32> [[VMIN2_I]]
return vmin_s32(v1, v2);
}
// ALL-LABEL: @test_vmin_u8
uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u8i>, !cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
- // LLVM: [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
- // LLVM-NEXT: ret <8 x i8> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> [[VMIN_I]], <8 x i8> [[VMIN1_I]])
+ // LLVM-NEXT: ret <8 x i8> [[VMIN2_I]]
return vmin_u8(v1, v2);
}
// ALL-LABEL: @test_vmin_u16
uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u16i>, !cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
- // LLVM: [[VMIN_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> [[V1]], <4 x i16> [[V2]])
- // LLVM-NEXT: ret <4 x i16> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> [[VMIN_I]], <4 x i16> [[VMIN1_I]])
+ // LLVM-NEXT: ret <4 x i16> [[VMIN2_I]]
return vmin_u16(v1, v2);
}
// ALL-LABEL: @test_vmin_u32
uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !u32i>, !cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
- // LLVM: [[VMIN_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> [[V1]], <2 x i32> [[V2]])
- // LLVM-NEXT: ret <2 x i32> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> [[VMIN_I]], <2 x i32> [[VMIN1_I]])
+ // LLVM-NEXT: ret <2 x i32> [[VMIN2_I]]
return vmin_u32(v1, v2);
}
// ALL-LABEL: @test_vminq_s8
int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
- // LLVM: [[VMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
- // LLVM-NEXT: ret <16 x i8> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> [[VMIN_I]], <16 x i8> [[VMIN1_I]])
+ // LLVM-NEXT: ret <16 x i8> [[VMIN2_I]]
return vminq_s8(v1, v2);
}
// ALL-LABEL: @test_vminq_s16
int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
- // LLVM: [[VMIN_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> [[V1]], <8 x i16> [[V2]])
- // LLVM-NEXT: ret <8 x i16> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> [[VMIN_I]], <8 x i16> [[VMIN1_I]])
+ // LLVM-NEXT: ret <8 x i16> [[VMIN2_I]]
return vminq_s16(v1, v2);
}
// ALL-LABEL: @test_vminq_s32
int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
- // LLVM: [[VMIN_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32> [[V1]], <4 x i32> [[V2]])
- // LLVM-NEXT: ret <4 x i32> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32> [[VMIN_I]], <4 x i32> [[VMIN1_I]])
+ // LLVM-NEXT: ret <4 x i32> [[VMIN2_I]]
return vminq_s32(v1, v2);
}
// ALL-LABEL: @test_vminq_u8
uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !u8i>, !cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
- // LLVM: [[VMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
- // LLVM-NEXT: ret <16 x i8> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> [[VMIN_I]], <16 x i8> [[VMIN1_I]])
+ // LLVM-NEXT: ret <16 x i8> [[VMIN2_I]]
return vminq_u8(v1, v2);
}
// ALL-LABEL: @test_vminq_u16
uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u16i>, !cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
- // LLVM: [[VMIN_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> [[V1]], <8 x i16> [[V2]])
- // LLVM-NEXT: ret <8 x i16> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> [[VMIN_I]], <8 x i16> [[VMIN1_I]])
+ // LLVM-NEXT: ret <8 x i16> [[VMIN2_I]]
return vminq_u16(v1, v2);
}
// ALL-LABEL: @test_vminq_u32
uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u32i>, !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
- // LLVM: [[VMIN_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32> [[V1]], <4 x i32> [[V2]])
- // LLVM-NEXT: ret <4 x i32> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32> [[VMIN_I]], <4 x i32> [[VMIN1_I]])
+ // LLVM-NEXT: ret <4 x i32> [[VMIN2_I]]
return vminq_u32(v1, v2);
}
// ALL-LABEL: @test_vmin_f32
float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
- // LLVM: [[VMIN_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float> [[V1]], <2 x float> [[V2]])
- // LLVM-NEXT: ret <2 x float> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float> [[VMIN_I]], <2 x float> [[VMIN1_I]])
+ // LLVM-NEXT: ret <2 x float> [[VMIN2_I]]
return vmin_f32(v1, v2);
}
// ALL-LABEL: @test_vmin_f64
float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
- // LLVM: [[VMIN_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> [[V1]], <1 x double> [[V2]])
- // LLVM-NEXT: ret <1 x double> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> [[VMIN_I]], <1 x double> [[VMIN1_I]])
+ // LLVM-NEXT: ret <1 x double> [[VMIN2_I]]
return vmin_f64(v1, v2);
}
// ALL-LABEL: @test_vminq_f32
float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
- // LLVM: [[VMIN_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
- // LLVM-NEXT: ret <4 x float> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> [[VMIN_I]], <4 x float> [[VMIN1_I]])
+ // LLVM-NEXT: ret <4 x float> [[VMIN2_I]]
return vminq_f32(v1, v2);
}
// ALL-LABEL: @test_vminq_f64
float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
- // LLVM: [[VMIN_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
- // LLVM-NEXT: ret <2 x double> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> [[VMIN_I]], <2 x double> [[VMIN1_I]])
+ // LLVM-NEXT: ret <2 x double> [[VMIN2_I]]
return vminq_f64(v1, v2);
}
// ALL-LABEL: @test_vminnm_f32
float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
- // LLVM: [[VMIN_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fminnm.v2f32(<2 x float> [[V1]], <2 x float> [[V2]])
- // LLVM-NEXT: ret <2 x float> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fminnm.v2f32(<2 x float> [[VMIN_I]], <2 x float> [[VMIN1_I]])
+ // LLVM-NEXT: ret <2 x float> [[VMIN2_I]]
return vminnm_f32(v1, v2);
}
// ALL-LABEL: @test_vminnm_f64
float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
- // LLVM: [[VMIN_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[V1]], <1 x double> [[V2]])
- // LLVM-NEXT: ret <1 x double> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[VMIN_I]], <1 x double> [[VMIN1_I]])
+ // LLVM-NEXT: ret <1 x double> [[VMIN2_I]]
return vminnm_f64(v1, v2);
}
// ALL-LABEL: @test_vminnmq_f32
float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
- // LLVM: [[VMIN_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
- // LLVM-NEXT: ret <4 x float> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[VMIN_I]], <4 x float> [[VMIN1_I]])
+ // LLVM-NEXT: ret <4 x float> [[VMIN2_I]]
return vminnmq_f32(v1, v2);
}
// ALL-LABEL: @test_vminnmq_f64
float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
- // LLVM: [[VMIN_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
- // LLVM-NEXT: ret <2 x double> [[VMIN_I]]
+ // LLVM: [[VMIN2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[VMIN_I]], <2 x double> [[VMIN1_I]])
+ // LLVM-NEXT: ret <2 x double> [[VMIN2_I]]
return vminnmq_f64(v1, v2);
}
\ No newline at end of file
>From 4ee8c8db4feea2866e8f7beba4c672286201bd0d Mon Sep 17 00:00:00 2001
From: Zhihui Yang <youngwisdm at gmail.com>
Date: Tue, 24 Mar 2026 11:20:24 +0800
Subject: [PATCH 06/15] Update LLVM intrinsic calls for vmin functions
---
clang/test/CodeGen/AArch64/neon/intrinsics.c | 187 +++++++++++++++----
1 file changed, 146 insertions(+), 41 deletions(-)
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index 43ba8c8774346..8d7ada09a5aa0 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -990,159 +990,264 @@ int64_t test_vshld_u64(int64_t a,int64_t b) {
// ALL-LABEL: @test_vmin_s8
int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s8i>, !cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
- // LLVM: [[VMIN2_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> [[VMIN_I]], <8 x i8> [[VMIN1_I]])
- // LLVM: ret <8 x i8> [[VMIN2_I]]
+ // LLVM-SAME: <8 x i8> noundef [[V1:%.*]], <8 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[VMIN_V_I:%.*]] = call <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
+ // LLVM-NEXT: ret <8 x i8> [[VMIN_V_I]]
return vmin_s8(v1, v2);
}
// ALL-LABEL: @test_vmin_s16
int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s16i>, !cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
- // LLVM: [[VMIN2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> [[VMIN_I]], <4 x i16> [[VMIN1_I]])
- // LLVM-NEXT: ret <4 x i16> [[VMIN2_I]]
+ // LLVM-SAME: <4 x i16> noundef [[V1:%.*]], <4 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8>
+ // LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
+ // LLVM-NEXT: [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
+ // LLVM-NEXT: [[VMIN_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> [[VMIN_V_I]], <4 x i16> [[VMIN_V1_I]])
+ // LLVM-NEXT: ret <4 x i16> [[VMIN_V2_I]]
return vmin_s16(v1, v2);
}
// ALL-LABEL: @test_vmin_s32
int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !s32i>, !cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
- // LLVM: [[VMIN2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> [[VMIN_I]], <2 x i32> [[VMIN1_I]])
- // LLVM-NEXT: ret <2 x i32> [[VMIN2_I]]
+ // LLVM-SAME: <2 x i32> noundef [[V1:%.*]], <2 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8>
+ // LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+ // LLVM-NEXT: [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
+ // LLVM-NEXT: [[VMIN_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> [[VMIN_V_I]], <2 x i32> [[VMIN_V1_I]])
+ // LLVM-NEXT: ret <2 x i32> [[VMIN_V2_I]]
return vmin_s32(v1, v2);
}
// ALL-LABEL: @test_vmin_u8
uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u8i>, !cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
- // LLVM: [[VMIN2_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> [[VMIN_I]], <8 x i8> [[VMIN1_I]])
- // LLVM-NEXT: ret <8 x i8> [[VMIN2_I]]
+ // LLVM-SAME: <8 x i8> noundef [[V1:%.*]], <8 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[VMIN_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
+ // LLVM-NEXT: ret <8 x i8> [[VMIN_V_I]]
return vmin_u8(v1, v2);
}
// ALL-LABEL: @test_vmin_u16
uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u16i>, !cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
- // LLVM: [[VMIN2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> [[VMIN_I]], <4 x i16> [[VMIN1_I]])
- // LLVM-NEXT: ret <4 x i16> [[VMIN2_I]]
+ // LLVM-SAME: <4 x i16> noundef [[V1:%.*]], <4 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8>
+ // LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
+ // LLVM-NEXT: [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
+ // LLVM-NEXT: [[VMIN_V2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> [[VMIN_V_I]], <4 x i16> [[VMIN_V1_I]])
+ // LLVM-NEXT: ret <4 x i16> [[VMIN_V2_I]]
return vmin_u16(v1, v2);
}
// ALL-LABEL: @test_vmin_u32
uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !u32i>, !cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
- // LLVM: [[VMIN2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> [[VMIN_I]], <2 x i32> [[VMIN1_I]])
- // LLVM-NEXT: ret <2 x i32> [[VMIN2_I]]
+ // LLVM-SAME: <2 x i32> noundef [[V1:%.*]], <2 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8>
+ // LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+ // LLVM-NEXT: [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
+ // LLVM-NEXT: [[VMIN_V2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> [[VMIN_V_I]], <2 x i32> [[VMIN_V1_I]])
+ // LLVM-NEXT: ret <2 x i32> [[VMIN_V2_I]]
return vmin_u32(v1, v2);
}
// ALL-LABEL: @test_vminq_s8
int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
- // LLVM: [[VMIN2_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> [[VMIN_I]], <16 x i8> [[VMIN1_I]])
- // LLVM-NEXT: ret <16 x i8> [[VMIN2_I]]
+ // LLVM-SAME: <16 x i8> noundef [[V1:%.*]], <16 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[VMINQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
+ // LLVM-NEXT: ret <16 x i8> [[VMINQ_V_I]]
return vminq_s8(v1, v2);
}
// ALL-LABEL: @test_vminq_s16
int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
- // LLVM: [[VMIN2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> [[VMIN_I]], <8 x i16> [[VMIN1_I]])
- // LLVM-NEXT: ret <8 x i16> [[VMIN2_I]]
+ // LLVM-SAME: <8 x i16> noundef [[V1:%.*]], <8 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8>
+ // LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
+ // LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
+ // LLVM-NEXT: [[VMINQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> [[VMINQ_V_I]], <8 x i16> [[VMINQ_V1_I]])
+ // LLVM-NEXT: ret <8 x i16> [[VMINQ_V2_I]]
return vminq_s16(v1, v2);
}
// ALL-LABEL: @test_vminq_s32
int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
- // LLVM: [[VMIN2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32> [[VMIN_I]], <4 x i32> [[VMIN1_I]])
- // LLVM-NEXT: ret <4 x i32> [[VMIN2_I]]
+ // LLVM-SAME: <4 x i32> noundef [[V1:%.*]], <4 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8>
+ // LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
+ // LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
+ // LLVM-NEXT: [[VMINQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32> [[VMINQ_V_I]], <4 x i32> [[VMINQ_V1_I]])
+ // LLVM-NEXT: ret <4 x i32> [[VMINQ_V2_I]]
return vminq_s32(v1, v2);
}
// ALL-LABEL: @test_vminq_u8
uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !u8i>, !cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
- // LLVM: [[VMIN2_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> [[VMIN_I]], <16 x i8> [[VMIN1_I]])
- // LLVM-NEXT: ret <16 x i8> [[VMIN2_I]]
+ // LLVM-SAME: <16 x i8> noundef [[V1:%.*]], <16 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[VMINQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
+ // LLVM-NEXT: ret <16 x i8> [[VMINQ_V_I]]
return vminq_u8(v1, v2);
}
// ALL-LABEL: @test_vminq_u16
uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u16i>, !cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
- // LLVM: [[VMIN2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> [[VMIN_I]], <8 x i16> [[VMIN1_I]])
- // LLVM-NEXT: ret <8 x i16> [[VMIN2_I]]
+ // LLVM-SAME: <8 x i16> noundef [[V1:%.*]], <8 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8>
+ // LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
+ // LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
+ // LLVM-NEXT: [[VMINQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> [[VMINQ_V_I]], <8 x i16> [[VMINQ_V1_I]])
+ // LLVM-NEXT: ret <8 x i16> [[VMINQ_V2_I]]
+
return vminq_u16(v1, v2);
}
// ALL-LABEL: @test_vminq_u32
uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u32i>, !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
- // LLVM: [[VMIN2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32> [[VMIN_I]], <4 x i32> [[VMIN1_I]])
- // LLVM-NEXT: ret <4 x i32> [[VMIN2_I]]
+ // LLVM-SAME: <4 x i32> noundef [[V1:%.*]], <4 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8>
+ // LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
+ // LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
+ // LLVM-NEXT: [[VMINQ_V2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32> [[VMINQ_V_I]], <4 x i32> [[VMINQ_V1_I]])
+ // LLVM-NEXT: ret <4 x i32> [[VMINQ_V2_I]]
return vminq_u32(v1, v2);
}
// ALL-LABEL: @test_vmin_f32
float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
- // LLVM: [[VMIN2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float> [[VMIN_I]], <2 x float> [[VMIN1_I]])
- // LLVM-NEXT: ret <2 x float> [[VMIN2_I]]
+ // LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <8 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <8 x i8>
+ // LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float>
+ // LLVM-NEXT: [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
+ // LLVM-NEXT: [[VMIN_V2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float> [[VMIN_V_I]], <2 x float> [[VMIN_V1_I]])
+ // LLVM-NEXT: ret <2 x float> [[VMIN_V2_I]]
return vmin_f32(v1, v2);
}
// ALL-LABEL: @test_vmin_f64
float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
- // LLVM: [[VMIN2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> [[VMIN_I]], <1 x double> [[VMIN1_I]])
- // LLVM-NEXT: ret <1 x double> [[VMIN2_I]]
+ // LLVM-SAME: <1 x double> noundef [[V1:%.*]], <1 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[V1]] to <8 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <1 x double> [[V2]] to <8 x i8>
+ // LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double>
+ // LLVM-NEXT: [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double>
+ // LLVM-NEXT: [[VMIN_V2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> [[VMIN_V_I]], <1 x double> [[VMIN_V1_I]])
+ // LLVM-NEXT: ret <1 x double> [[VMIN_V2_I]]
return vmin_f64(v1, v2);
}
// ALL-LABEL: @test_vminq_f32
float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
- // LLVM: [[VMIN2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> [[VMIN_I]], <4 x float> [[VMIN1_I]])
- // LLVM-NEXT: ret <4 x float> [[VMIN2_I]]
+ // LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <16 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <16 x i8>
+ // LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float>
+ // LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
+ // LLVM-NEXT: [[VMINQ_V2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> [[VMINQ_V_I]], <4 x float> [[VMINQ_V1_I]])
+ // LLVM-NEXT: ret <4 x float> [[VMINQ_V2_I]]
return vminq_f32(v1, v2);
}
// ALL-LABEL: @test_vminq_f64
float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
- // LLVM: [[VMIN2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> [[VMIN_I]], <2 x double> [[VMIN1_I]])
- // LLVM-NEXT: ret <2 x double> [[VMIN2_I]]
+ // LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <16 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <16 x i8>
+ // LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double>
+ // LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
+ // LLVM-NEXT: [[VMINQ_V2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> [[VMINQ_V_I]], <2 x double> [[VMINQ_V1_I]])
+ // LLVM-NEXT: ret <2 x double> [[VMINQ_V2_I]]
return vminq_f64(v1, v2);
}
// ALL-LABEL: @test_vminnm_f32
float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
- // LLVM: [[VMIN2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fminnm.v2f32(<2 x float> [[VMIN_I]], <2 x float> [[VMIN1_I]])
- // LLVM-NEXT: ret <2 x float> [[VMIN2_I]]
+ // LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <8 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <8 x i8>
+ // LLVM-NEXT: [[VMINNM_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float>
+ // LLVM-NEXT: [[VMINNM_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
+ // LLVM-NEXT: [[VMINNM_V2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fminnm.v2f32(<2 x float> [[VMINNM_V_I]], <2 x float> [[VMINNM_V1_I]])
+ // LLVM-NEXT: ret <2 x float> [[VMINNM_V2_I]]
return vminnm_f32(v1, v2);
}
// ALL-LABEL: @test_vminnm_f64
float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
- // LLVM: [[VMIN2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[VMIN_I]], <1 x double> [[VMIN1_I]])
- // LLVM-NEXT: ret <1 x double> [[VMIN2_I]]
+ // LLVM-SAME: <1 x double> noundef [[V1:%.*]], <1 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[V1]] to <8 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <1 x double> [[V2]] to <8 x i8>
+ // LLVM-NEXT: [[VMINNM_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double>
+ // LLVM-NEXT: [[VMINNM_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double>
+ // LLVM-NEXT: [[VMINNM_V2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[VMINNM_V_I]], <1 x double> [[VMINNM_V1_I]])
+ // LLVM-NEXT: ret <1 x double> [[VMINNM_V2_I]]
return vminnm_f64(v1, v2);
}
// ALL-LABEL: @test_vminnmq_f32
float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
- // LLVM: [[VMIN2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[VMIN_I]], <4 x float> [[VMIN1_I]])
- // LLVM-NEXT: ret <4 x float> [[VMIN2_I]]
+ // LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <16 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <16 x i8>
+ // LLVM-NEXT: [[VMINNM_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float>
+ // LLVM-NEXT: [[VMINNM_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
+ // LLVM-NEXT: [[VMINNM_V2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[VMINNM_V_I]], <4 x float> [[VMINNM_V1_I]])
+ // LLVM-NEXT: ret <4 x float> [[VMINNM_V2_I]]
return vminnmq_f32(v1, v2);
}
// ALL-LABEL: @test_vminnmq_f64
float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
- // LLVM: [[VMIN2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[VMIN_I]], <2 x double> [[VMIN1_I]])
- // LLVM-NEXT: ret <2 x double> [[VMIN2_I]]
+ // LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <16 x i8>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <16 x i8>
+ // LLVM-NEXT: [[VMINNM_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double>
+ // LLVM-NEXT: [[VMINNM_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
+ // LLVM-NEXT: [[VMINNM_V2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[VMINNM_V_I]], <2 x double> [[VMINNM_V1_I]])
+ // LLVM-NEXT: ret <2 x double> [[VMINNM_V2_I]]
return vminnmq_f64(v1, v2);
-}
\ No newline at end of file
+}
>From f5257371c0516cd36ec6248dd57f7fec1e006fb1 Mon Sep 17 00:00:00 2001
From: Zhihui Yang <youngwisdm at gmail.com>
Date: Tue, 24 Mar 2026 13:56:01 +0800
Subject: [PATCH 07/15] Update LLVM intrinsics for vmin functions
---
clang/test/CodeGen/AArch64/neon/intrinsics.c | 78 ++++++++++----------
1 file changed, 39 insertions(+), 39 deletions(-)
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index 8d7ada09a5aa0..ae851ac674443 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1145,10 +1145,12 @@ float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
// LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <8 x i8>
- // LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <8 x i8>
- // LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float>
- // LLVM-NEXT: [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
+ // LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x float> [[TMP0]] to <8 x i8>
+ // LLVM-NEXT: [[TMP3:%.*]] = bitcast <2 x float> [[TMP1]] to <8 x i8>
+ // LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x float>
+ // LLVM-NEXT: [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float>
// LLVM-NEXT: [[VMIN_V2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float> [[VMIN_V_I]], <2 x float> [[VMIN_V1_I]])
// LLVM-NEXT: ret <2 x float> [[VMIN_V2_I]]
return vmin_f32(v1, v2);
@@ -1157,14 +1159,8 @@ float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
// ALL-LABEL: @test_vmin_f64
float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
- // LLVM-SAME: <1 x double> noundef [[V1:%.*]], <1 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[V1]] to <8 x i8>
- // LLVM-NEXT: [[TMP1:%.*]] = bitcast <1 x double> [[V2]] to <8 x i8>
- // LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double>
- // LLVM-NEXT: [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double>
- // LLVM-NEXT: [[VMIN_V2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> [[VMIN_V_I]], <1 x double> [[VMIN_V1_I]])
- // LLVM-NEXT: ret <1 x double> [[VMIN_V2_I]]
+ // LLVM: [[VMIN_V2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> %{{.*}}, <1 x double> %{{.*}})
+ // LLVM: ret <1 x double> [[VMIN_V2_I]]
return vmin_f64(v1, v2);
}
@@ -1173,10 +1169,12 @@ float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
// LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <16 x i8>
- // LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <16 x i8>
- // LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float>
- // LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32>
+ // LLVM-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
+ // LLVM-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8>
+ // LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x float>
+ // LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x float>
// LLVM-NEXT: [[VMINQ_V2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> [[VMINQ_V_I]], <4 x float> [[VMINQ_V1_I]])
// LLVM-NEXT: ret <4 x float> [[VMINQ_V2_I]]
return vminq_f32(v1, v2);
@@ -1187,10 +1185,12 @@ float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
// LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <16 x i8>
- // LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <16 x i8>
- // LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double>
- // LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64>
+ // LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
+ // LLVM-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to <16 x i8>
+ // LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x double>
+ // LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x double>
// LLVM-NEXT: [[VMINQ_V2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> [[VMINQ_V_I]], <2 x double> [[VMINQ_V1_I]])
// LLVM-NEXT: ret <2 x double> [[VMINQ_V2_I]]
return vminq_f64(v1, v2);
@@ -1201,10 +1201,12 @@ float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
// LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <8 x i8>
- // LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <8 x i8>
- // LLVM-NEXT: [[VMINNM_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float>
- // LLVM-NEXT: [[VMINNM_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
+ // LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
+ // LLVM-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8>
+ // LLVM-NEXT: [[VMINNM_V_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x float>
+ // LLVM-NEXT: [[VMINNM_V1_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float>
// LLVM-NEXT: [[VMINNM_V2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fminnm.v2f32(<2 x float> [[VMINNM_V_I]], <2 x float> [[VMINNM_V1_I]])
// LLVM-NEXT: ret <2 x float> [[VMINNM_V2_I]]
return vminnm_f32(v1, v2);
@@ -1213,13 +1215,7 @@ float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
// ALL-LABEL: @test_vminnm_f64
float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
- // LLVM-SAME: <1 x double> noundef [[V1:%.*]], <1 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[V1]] to <8 x i8>
- // LLVM-NEXT: [[TMP1:%.*]] = bitcast <1 x double> [[V2]] to <8 x i8>
- // LLVM-NEXT: [[VMINNM_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double>
- // LLVM-NEXT: [[VMINNM_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double>
- // LLVM-NEXT: [[VMINNM_V2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[VMINNM_V_I]], <1 x double> [[VMINNM_V1_I]])
+ // LLVM-NEXT: [[VMINNM_V2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> %{{.*}}, <1 x double> %{{.*}})
// LLVM-NEXT: ret <1 x double> [[VMINNM_V2_I]]
return vminnm_f64(v1, v2);
}
@@ -1229,10 +1225,12 @@ float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
// LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <16 x i8>
- // LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <16 x i8>
- // LLVM-NEXT: [[VMINNM_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float>
- // LLVM-NEXT: [[VMINNM_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32>
+ // LLVM-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
+ // LLVM-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8>
+ // LLVM-NEXT: [[VMINNM_V_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x float>
+ // LLVM-NEXT: [[VMINNM_V1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x float>
// LLVM-NEXT: [[VMINNM_V2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[VMINNM_V_I]], <4 x float> [[VMINNM_V1_I]])
// LLVM-NEXT: ret <4 x float> [[VMINNM_V2_I]]
return vminnmq_f32(v1, v2);
@@ -1243,10 +1241,12 @@ float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
// LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <16 x i8>
- // LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <16 x i8>
- // LLVM-NEXT: [[VMINNM_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double>
- // LLVM-NEXT: [[VMINNM_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
+ // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
+ // LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64>
+ // LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
+ // LLVM-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to <16 x i8>
+ // LLVM-NEXT: [[VMINNM_V_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x double>
+ // LLVM-NEXT: [[VMINNM_V1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x double>
// LLVM-NEXT: [[VMINNM_V2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[VMINNM_V_I]], <2 x double> [[VMINNM_V1_I]])
// LLVM-NEXT: ret <2 x double> [[VMINNM_V2_I]]
return vminnmq_f64(v1, v2);
>From 765d54e3c6f6c75350ecc0ad6f21aeb67348b7f8 Mon Sep 17 00:00:00 2001
From: Zhihui Yang <youngwisdm at gmail.com>
Date: Tue, 24 Mar 2026 14:17:15 +0800
Subject: [PATCH 08/15] Fix bitcast types in LLVM IR comments
---
clang/test/CodeGen/AArch64/neon/intrinsics.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index ae851ac674443..69c3dd42c90b2 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1147,8 +1147,8 @@ float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
// LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
- // LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x float> [[TMP0]] to <8 x i8>
- // LLVM-NEXT: [[TMP3:%.*]] = bitcast <2 x float> [[TMP1]] to <8 x i8>
+ // LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
+ // LLVM-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8>
// LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x float>
// LLVM-NEXT: [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float>
// LLVM-NEXT: [[VMIN_V2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float> [[VMIN_V_I]], <2 x float> [[VMIN_V1_I]])
@@ -1215,8 +1215,8 @@ float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
// ALL-LABEL: @test_vminnm_f64
float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
- // LLVM-NEXT: [[VMINNM_V2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> %{{.*}}, <1 x double> %{{.*}})
- // LLVM-NEXT: ret <1 x double> [[VMINNM_V2_I]]
+ // LLVM: [[VMINNM_V2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> %{{.*}}, <1 x double> %{{.*}})
+ // LLVM: ret <1 x double> [[VMINNM_V2_I]]
return vminnm_f64(v1, v2);
}
>From 9dd3e015180913e05763d3340fbb3523190ef467 Mon Sep 17 00:00:00 2001
From: Zhihui Yang <youngwisdm at gmail.com>
Date: Tue, 24 Mar 2026 14:34:25 +0800
Subject: [PATCH 09/15] Update LLVM intrinsic call for vmin_s8 function
---
clang/test/CodeGen/AArch64/neon/intrinsics.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index 69c3dd42c90b2..5cd9b7695b47d 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -992,7 +992,7 @@ int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s8i>, !cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
// LLVM-SAME: <8 x i8> noundef [[V1:%.*]], <8 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[VMIN_V_I:%.*]] = call <8 x i8> @llvm.arm.neon.vmins.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
+ // LLVM-NEXT: [[VMIN_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
// LLVM-NEXT: ret <8 x i8> [[VMIN_V_I]]
return vmin_s8(v1, v2);
}
>From 1b2f4fe2a0eb990d6d1db07a9c40a51c834c288a Mon Sep 17 00:00:00 2001
From: Zhihui Yang <youngwisdm at gmail.com>
Date: Tue, 24 Mar 2026 14:54:55 +0800
Subject: [PATCH 10/15] Clean up comments in NEON intrinsic tests
Removed commented LLVM-NEXT entries from various test functions for minimum value operations.
---
clang/test/CodeGen/AArch64/neon/intrinsics.c | 18 ------------------
1 file changed, 18 deletions(-)
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index 5cd9b7695b47d..8caae372cf7dd 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1001,7 +1001,6 @@ int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s16i>, !cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
// LLVM-SAME: <4 x i16> noundef [[V1:%.*]], <4 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8>
// LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
@@ -1015,7 +1014,6 @@ int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !s32i>, !cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
// LLVM-SAME: <2 x i32> noundef [[V1:%.*]], <2 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8>
// LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
@@ -1029,7 +1027,6 @@ int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u8i>, !cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
// LLVM-SAME: <8 x i8> noundef [[V1:%.*]], <8 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[VMIN_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
// LLVM-NEXT: ret <8 x i8> [[VMIN_V_I]]
return vmin_u8(v1, v2);
@@ -1039,7 +1036,6 @@ uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u16i>, !cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
// LLVM-SAME: <4 x i16> noundef [[V1:%.*]], <4 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8>
// LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
@@ -1053,7 +1049,6 @@ uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !u32i>, !cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
// LLVM-SAME: <2 x i32> noundef [[V1:%.*]], <2 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8>
// LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
@@ -1067,7 +1062,6 @@ uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
// LLVM-SAME: <16 x i8> noundef [[V1:%.*]], <16 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
// LLVM-NEXT: ret <16 x i8> [[VMINQ_V_I]]
return vminq_s8(v1, v2);
@@ -1077,7 +1071,6 @@ int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
// LLVM-SAME: <8 x i16> noundef [[V1:%.*]], <8 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8>
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
@@ -1091,7 +1084,6 @@ int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
// LLVM-SAME: <4 x i32> noundef [[V1:%.*]], <4 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8>
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
@@ -1105,7 +1097,6 @@ int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !u8i>, !cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
// LLVM-SAME: <16 x i8> noundef [[V1:%.*]], <16 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
// LLVM-NEXT: ret <16 x i8> [[VMINQ_V_I]]
return vminq_u8(v1, v2);
@@ -1115,14 +1106,12 @@ uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u16i>, !cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
// LLVM-SAME: <8 x i16> noundef [[V1:%.*]], <8 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8>
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
// LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
// LLVM-NEXT: [[VMINQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> [[VMINQ_V_I]], <8 x i16> [[VMINQ_V1_I]])
// LLVM-NEXT: ret <8 x i16> [[VMINQ_V2_I]]
-
return vminq_u16(v1, v2);
}
@@ -1130,7 +1119,6 @@ uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u32i>, !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
// LLVM-SAME: <4 x i32> noundef [[V1:%.*]], <4 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8>
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
@@ -1144,7 +1132,6 @@ uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
// LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
@@ -1168,7 +1155,6 @@ float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
// LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
@@ -1184,7 +1170,6 @@ float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
// LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
@@ -1200,7 +1185,6 @@ float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
// LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
@@ -1224,7 +1208,6 @@ float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
// LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
@@ -1240,7 +1223,6 @@ float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
// LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
>From 32a274fe10999ad1233fd767860dc28bfe176269 Mon Sep 17 00:00:00 2001
From: Zhihui Yang <youngwisdm at gmail.com>
Date: Tue, 24 Mar 2026 15:15:17 +0800
Subject: [PATCH 11/15] Update vmin and vminq functions for NEON intrinsics
---
clang/test/CodeGen/AArch64/neon/intrinsics.c | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index 8caae372cf7dd..5cd9b7695b47d 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1001,6 +1001,7 @@ int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s16i>, !cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
// LLVM-SAME: <4 x i16> noundef [[V1:%.*]], <4 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8>
// LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
@@ -1014,6 +1015,7 @@ int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !s32i>, !cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
// LLVM-SAME: <2 x i32> noundef [[V1:%.*]], <2 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8>
// LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
@@ -1027,6 +1029,7 @@ int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u8i>, !cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
// LLVM-SAME: <8 x i8> noundef [[V1:%.*]], <8 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[VMIN_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
// LLVM-NEXT: ret <8 x i8> [[VMIN_V_I]]
return vmin_u8(v1, v2);
@@ -1036,6 +1039,7 @@ uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u16i>, !cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
// LLVM-SAME: <4 x i16> noundef [[V1:%.*]], <4 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8>
// LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
@@ -1049,6 +1053,7 @@ uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !u32i>, !cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
// LLVM-SAME: <2 x i32> noundef [[V1:%.*]], <2 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8>
// LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
@@ -1062,6 +1067,7 @@ uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
// LLVM-SAME: <16 x i8> noundef [[V1:%.*]], <16 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
// LLVM-NEXT: ret <16 x i8> [[VMINQ_V_I]]
return vminq_s8(v1, v2);
@@ -1071,6 +1077,7 @@ int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
// LLVM-SAME: <8 x i16> noundef [[V1:%.*]], <8 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8>
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
@@ -1084,6 +1091,7 @@ int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
// LLVM-SAME: <4 x i32> noundef [[V1:%.*]], <4 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8>
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
@@ -1097,6 +1105,7 @@ int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !u8i>, !cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
// LLVM-SAME: <16 x i8> noundef [[V1:%.*]], <16 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
// LLVM-NEXT: ret <16 x i8> [[VMINQ_V_I]]
return vminq_u8(v1, v2);
@@ -1106,12 +1115,14 @@ uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u16i>, !cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
// LLVM-SAME: <8 x i16> noundef [[V1:%.*]], <8 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8>
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
// LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
// LLVM-NEXT: [[VMINQ_V2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> [[VMINQ_V_I]], <8 x i16> [[VMINQ_V1_I]])
// LLVM-NEXT: ret <8 x i16> [[VMINQ_V2_I]]
+
return vminq_u16(v1, v2);
}
@@ -1119,6 +1130,7 @@ uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u32i>, !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
// LLVM-SAME: <4 x i32> noundef [[V1:%.*]], <4 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8>
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
@@ -1132,6 +1144,7 @@ uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
// LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
@@ -1155,6 +1168,7 @@ float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
// LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
@@ -1170,6 +1184,7 @@ float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
// LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
@@ -1185,6 +1200,7 @@ float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
// LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
@@ -1208,6 +1224,7 @@ float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
// LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
@@ -1223,6 +1240,7 @@ float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
// LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
+ // LLVM-NEXT: [[ENTRY:.*:]]
// LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
>From 25739df9bbe37099af8685350d3737923b292f50 Mon Sep 17 00:00:00 2001
From: Zhihui Yang <youngwisdm at gmail.com>
Date: Tue, 24 Mar 2026 15:32:01 +0800
Subject: [PATCH 12/15] Update LLVM intrinsic calls in intrinsics.c
---
clang/test/CodeGen/AArch64/neon/intrinsics.c | 54 +++++++-------------
1 file changed, 18 insertions(+), 36 deletions(-)
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index 5cd9b7695b47d..7ccbb4f44d5f4 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -991,8 +991,7 @@ int64_t test_vshld_u64(int64_t a,int64_t b) {
int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s8i>, !cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
// LLVM-SAME: <8 x i8> noundef [[V1:%.*]], <8 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[VMIN_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
+ // LLVM: [[VMIN_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
// LLVM-NEXT: ret <8 x i8> [[VMIN_V_I]]
return vmin_s8(v1, v2);
}
@@ -1001,8 +1000,7 @@ int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s16i>, !cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
// LLVM-SAME: <4 x i16> noundef [[V1:%.*]], <4 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
+ // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8>
// LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
// LLVM-NEXT: [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
@@ -1015,8 +1013,7 @@ int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !s32i>, !cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
// LLVM-SAME: <2 x i32> noundef [[V1:%.*]], <2 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
+ // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8>
// LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
// LLVM-NEXT: [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
@@ -1029,8 +1026,7 @@ int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u8i>, !cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
// LLVM-SAME: <8 x i8> noundef [[V1:%.*]], <8 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[VMIN_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
+ // LLVM: [[VMIN_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
// LLVM-NEXT: ret <8 x i8> [[VMIN_V_I]]
return vmin_u8(v1, v2);
}
@@ -1039,8 +1035,7 @@ uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u16i>, !cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
// LLVM-SAME: <4 x i16> noundef [[V1:%.*]], <4 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
+ // LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8>
// LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
// LLVM-NEXT: [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
@@ -1053,8 +1048,7 @@ uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !u32i>, !cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
// LLVM-SAME: <2 x i32> noundef [[V1:%.*]], <2 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
+ // LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8>
// LLVM-NEXT: [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
// LLVM-NEXT: [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
@@ -1067,8 +1061,7 @@ uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
// LLVM-SAME: <16 x i8> noundef [[V1:%.*]], <16 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[VMINQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
+ // LLVM: [[VMINQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
// LLVM-NEXT: ret <16 x i8> [[VMINQ_V_I]]
return vminq_s8(v1, v2);
}
@@ -1077,8 +1070,7 @@ int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
// LLVM-SAME: <8 x i16> noundef [[V1:%.*]], <8 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
+ // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8>
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
// LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
@@ -1091,8 +1083,7 @@ int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
// LLVM-SAME: <4 x i32> noundef [[V1:%.*]], <4 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
+ // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8>
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
// LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
@@ -1105,8 +1096,7 @@ int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !u8i>, !cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
// LLVM-SAME: <16 x i8> noundef [[V1:%.*]], <16 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[VMINQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
+ // LLVM: [[VMINQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
// LLVM-NEXT: ret <16 x i8> [[VMINQ_V_I]]
return vminq_u8(v1, v2);
}
@@ -1115,8 +1105,7 @@ uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u16i>, !cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
// LLVM-SAME: <8 x i16> noundef [[V1:%.*]], <8 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
+ // LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8>
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
// LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
@@ -1130,8 +1119,7 @@ uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u32i>, !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
// LLVM-SAME: <4 x i32> noundef [[V1:%.*]], <4 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
+ // LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8>
// LLVM-NEXT: [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
// LLVM-NEXT: [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
@@ -1144,8 +1132,7 @@ uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
// LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
+ // LLVM: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
// LLVM-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8>
@@ -1168,8 +1155,7 @@ float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
// LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
+ // LLVM: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
// LLVM-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8>
@@ -1184,8 +1170,7 @@ float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
// LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
+ // LLVM: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
// LLVM-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to <16 x i8>
@@ -1200,8 +1185,7 @@ float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
// LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
+ // LLVM: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
// LLVM-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8>
@@ -1224,8 +1208,7 @@ float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
// LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
+ // LLVM: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
// LLVM-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8>
@@ -1240,8 +1223,7 @@ float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
// LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
- // LLVM-NEXT: [[ENTRY:.*:]]
- // LLVM-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
+ // LLVM: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64>
// LLVM-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
// LLVM-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to <16 x i8>
>From d8c3cbd076f083406844754f74080855d939159d Mon Sep 17 00:00:00 2001
From: Zhihui Yang <youngwisdm at gmail.com>
Date: Tue, 24 Mar 2026 17:47:35 +0800
Subject: [PATCH 13/15] format code
---
clang/test/CodeGen/AArch64/neon/intrinsics.c | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index 7ccbb4f44d5f4..4349674a3b9e9 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -990,6 +990,7 @@ int64_t test_vshld_u64(int64_t a,int64_t b) {
// ALL-LABEL: @test_vmin_s8
int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s8i>, !cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
+
// LLVM-SAME: <8 x i8> noundef [[V1:%.*]], <8 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[VMIN_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
// LLVM-NEXT: ret <8 x i8> [[VMIN_V_I]]
@@ -999,6 +1000,7 @@ int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
// ALL-LABEL: @test_vmin_s16
int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s16i>, !cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
+
// LLVM-SAME: <4 x i16> noundef [[V1:%.*]], <4 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8>
@@ -1012,6 +1014,7 @@ int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
// ALL-LABEL: @test_vmin_s32
int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !s32i>, !cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
+
// LLVM-SAME: <2 x i32> noundef [[V1:%.*]], <2 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8>
@@ -1025,6 +1028,7 @@ int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
// ALL-LABEL: @test_vmin_u8
uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u8i>, !cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
+
// LLVM-SAME: <8 x i8> noundef [[V1:%.*]], <8 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[VMIN_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
// LLVM-NEXT: ret <8 x i8> [[VMIN_V_I]]
@@ -1034,6 +1038,7 @@ uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
// ALL-LABEL: @test_vmin_u16
uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u16i>, !cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
+
// LLVM-SAME: <4 x i16> noundef [[V1:%.*]], <4 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8>
@@ -1047,6 +1052,7 @@ uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
// ALL-LABEL: @test_vmin_u32
uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !u32i>, !cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
+
// LLVM-SAME: <2 x i32> noundef [[V1:%.*]], <2 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8>
@@ -1060,6 +1066,7 @@ uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
// ALL-LABEL: @test_vminq_s8
int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
+
// LLVM-SAME: <16 x i8> noundef [[V1:%.*]], <16 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[VMINQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
// LLVM-NEXT: ret <16 x i8> [[VMINQ_V_I]]
@@ -1069,6 +1076,7 @@ int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
// ALL-LABEL: @test_vminq_s16
int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
+
// LLVM-SAME: <8 x i16> noundef [[V1:%.*]], <8 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8>
@@ -1082,6 +1090,7 @@ int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
// ALL-LABEL: @test_vminq_s32
int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
+
// LLVM-SAME: <4 x i32> noundef [[V1:%.*]], <4 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8>
@@ -1095,6 +1104,7 @@ int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
// ALL-LABEL: @test_vminq_u8
uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !u8i>, !cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
+
// LLVM-SAME: <16 x i8> noundef [[V1:%.*]], <16 x i8> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[VMINQ_V_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
// LLVM-NEXT: ret <16 x i8> [[VMINQ_V_I]]
@@ -1104,6 +1114,7 @@ uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
// ALL-LABEL: @test_vminq_u16
uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u16i>, !cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
+
// LLVM-SAME: <8 x i16> noundef [[V1:%.*]], <8 x i16> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8>
@@ -1118,6 +1129,7 @@ uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
// ALL-LABEL: @test_vminq_u32
uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u32i>, !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
+
// LLVM-SAME: <4 x i32> noundef [[V1:%.*]], <4 x i32> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8>
@@ -1131,6 +1143,7 @@ uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
// ALL-LABEL: @test_vmin_f32
float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
+
// LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
@@ -1146,6 +1159,7 @@ float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
// ALL-LABEL: @test_vmin_f64
float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
+
// LLVM: [[VMIN_V2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> %{{.*}}, <1 x double> %{{.*}})
// LLVM: ret <1 x double> [[VMIN_V2_I]]
return vmin_f64(v1, v2);
@@ -1154,6 +1168,7 @@ float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
// ALL-LABEL: @test_vminq_f32
float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+
// LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32>
@@ -1169,6 +1184,7 @@ float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
// ALL-LABEL: @test_vminq_f64
float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+
// LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64>
@@ -1184,6 +1200,7 @@ float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
// ALL-LABEL: @test_vminnm_f32
float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
+
// LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
@@ -1199,6 +1216,7 @@ float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
// ALL-LABEL: @test_vminnm_f64
float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
+
// LLVM: [[VMINNM_V2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> %{{.*}}, <1 x double> %{{.*}})
// LLVM: ret <1 x double> [[VMINNM_V2_I]]
return vminnm_f64(v1, v2);
@@ -1207,6 +1225,7 @@ float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
// ALL-LABEL: @test_vminnmq_f32
float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+
// LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32>
@@ -1222,6 +1241,7 @@ float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
// ALL-LABEL: @test_vminnmq_f64
float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+
// LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64>
>From 7210fc2885ea4a30e5fe62b0da459d797eb787bb Mon Sep 17 00:00:00 2001
From: Zhihui Yang <youngwisdm at gmail.com>
Date: Tue, 24 Mar 2026 08:21:20 -0700
Subject: [PATCH 14/15] Update LLVM and CIR labels for vmin and vminq functions
in NEON intrinsics
---
clang/test/CodeGen/AArch64/neon-intrinsics.c | 200 -------------------
clang/test/CodeGen/AArch64/neon/intrinsics.c | 60 ++++--
2 files changed, 40 insertions(+), 220 deletions(-)
diff --git a/clang/test/CodeGen/AArch64/neon-intrinsics.c b/clang/test/CodeGen/AArch64/neon-intrinsics.c
index 8eb6cd86339d6..70eb4aef85641 100644
--- a/clang/test/CodeGen/AArch64/neon-intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon-intrinsics.c
@@ -5417,206 +5417,6 @@ float64x2_t test_vmaxq_f64(float64x2_t a, float64x2_t b) {
return vmaxq_f64(a, b);
}
-// CHECK-LABEL: define dso_local <8 x i8> @test_vmin_s8(
-// CHECK-SAME: <8 x i8> noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> [[A]], <8 x i8> [[B]])
-// CHECK-NEXT: ret <8 x i8> [[VMIN_I]]
-//
-int8x8_t test_vmin_s8(int8x8_t a, int8x8_t b) {
- return vmin_s8(a, b);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vmin_s16(
-// CHECK-SAME: <4 x i16> noundef [[A:%.*]], <4 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
-// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
-// CHECK-NEXT: [[VMIN_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK-NEXT: [[VMIN1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
-// CHECK-NEXT: [[VMIN2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> [[VMIN_I]], <4 x i16> [[VMIN1_I]])
-// CHECK-NEXT: ret <4 x i16> [[VMIN2_I]]
-//
-int16x4_t test_vmin_s16(int16x4_t a, int16x4_t b) {
- return vmin_s16(a, b);
-}
-
-// CHECK-LABEL: define dso_local <2 x i32> @test_vmin_s32(
-// CHECK-SAME: <2 x i32> noundef [[A:%.*]], <2 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
-// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
-// CHECK-NEXT: [[VMIN_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
-// CHECK-NEXT: [[VMIN1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
-// CHECK-NEXT: [[VMIN2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> [[VMIN_I]], <2 x i32> [[VMIN1_I]])
-// CHECK-NEXT: ret <2 x i32> [[VMIN2_I]]
-//
-int32x2_t test_vmin_s32(int32x2_t a, int32x2_t b) {
- return vmin_s32(a, b);
-}
-
-// CHECK-LABEL: define dso_local <8 x i8> @test_vmin_u8(
-// CHECK-SAME: <8 x i8> noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> [[A]], <8 x i8> [[B]])
-// CHECK-NEXT: ret <8 x i8> [[VMIN_I]]
-//
-uint8x8_t test_vmin_u8(uint8x8_t a, uint8x8_t b) {
- return vmin_u8(a, b);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vmin_u16(
-// CHECK-SAME: <4 x i16> noundef [[A:%.*]], <4 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
-// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
-// CHECK-NEXT: [[VMIN_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK-NEXT: [[VMIN1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
-// CHECK-NEXT: [[VMIN2_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> [[VMIN_I]], <4 x i16> [[VMIN1_I]])
-// CHECK-NEXT: ret <4 x i16> [[VMIN2_I]]
-//
-uint16x4_t test_vmin_u16(uint16x4_t a, uint16x4_t b) {
- return vmin_u16(a, b);
-}
-
-// CHECK-LABEL: define dso_local <2 x i32> @test_vmin_u32(
-// CHECK-SAME: <2 x i32> noundef [[A:%.*]], <2 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
-// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
-// CHECK-NEXT: [[VMIN_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
-// CHECK-NEXT: [[VMIN1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
-// CHECK-NEXT: [[VMIN2_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> [[VMIN_I]], <2 x i32> [[VMIN1_I]])
-// CHECK-NEXT: ret <2 x i32> [[VMIN2_I]]
-//
-uint32x2_t test_vmin_u32(uint32x2_t a, uint32x2_t b) {
- return vmin_u32(a, b);
-}
-
-// CHECK-LABEL: define dso_local <2 x float> @test_vmin_f32(
-// CHECK-SAME: <2 x float> noundef [[A:%.*]], <2 x float> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
-// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x float> [[B]] to <2 x i32>
-// CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
-// CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8>
-// CHECK-NEXT: [[VMIN_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x float>
-// CHECK-NEXT: [[VMIN1_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float>
-// CHECK-NEXT: [[VMIN2_I:%.*]] = call <2 x float> @llvm.aarch64.neon.fmin.v2f32(<2 x float> [[VMIN_I]], <2 x float> [[VMIN1_I]])
-// CHECK-NEXT: ret <2 x float> [[VMIN2_I]]
-//
-float32x2_t test_vmin_f32(float32x2_t a, float32x2_t b) {
- return vmin_f32(a, b);
-}
-
-// CHECK-LABEL: define dso_local <16 x i8> @test_vminq_s8(
-// CHECK-SAME: <16 x i8> noundef [[A:%.*]], <16 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> [[A]], <16 x i8> [[B]])
-// CHECK-NEXT: ret <16 x i8> [[VMIN_I]]
-//
-int8x16_t test_vminq_s8(int8x16_t a, int8x16_t b) {
- return vminq_s8(a, b);
-}
-
-// CHECK-LABEL: define dso_local <8 x i16> @test_vminq_s16(
-// CHECK-SAME: <8 x i16> noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
-// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
-// CHECK-NEXT: [[VMIN_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK-NEXT: [[VMIN1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
-// CHECK-NEXT: [[VMIN2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> [[VMIN_I]], <8 x i16> [[VMIN1_I]])
-// CHECK-NEXT: ret <8 x i16> [[VMIN2_I]]
-//
-int16x8_t test_vminq_s16(int16x8_t a, int16x8_t b) {
- return vminq_s16(a, b);
-}
-
-// CHECK-LABEL: define dso_local <4 x i32> @test_vminq_s32(
-// CHECK-SAME: <4 x i32> noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
-// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
-// CHECK-NEXT: [[VMIN_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK-NEXT: [[VMIN1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
-// CHECK-NEXT: [[VMIN2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32> [[VMIN_I]], <4 x i32> [[VMIN1_I]])
-// CHECK-NEXT: ret <4 x i32> [[VMIN2_I]]
-//
-int32x4_t test_vminq_s32(int32x4_t a, int32x4_t b) {
- return vminq_s32(a, b);
-}
-
-// CHECK-LABEL: define dso_local <16 x i8> @test_vminq_u8(
-// CHECK-SAME: <16 x i8> noundef [[A:%.*]], <16 x i8> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[VMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> [[A]], <16 x i8> [[B]])
-// CHECK-NEXT: ret <16 x i8> [[VMIN_I]]
-//
-uint8x16_t test_vminq_u8(uint8x16_t a, uint8x16_t b) {
- return vminq_u8(a, b);
-}
-
-// CHECK-LABEL: define dso_local <8 x i16> @test_vminq_u16(
-// CHECK-SAME: <8 x i16> noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
-// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
-// CHECK-NEXT: [[VMIN_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK-NEXT: [[VMIN1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
-// CHECK-NEXT: [[VMIN2_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> [[VMIN_I]], <8 x i16> [[VMIN1_I]])
-// CHECK-NEXT: ret <8 x i16> [[VMIN2_I]]
-//
-uint16x8_t test_vminq_u16(uint16x8_t a, uint16x8_t b) {
- return vminq_u16(a, b);
-}
-
-// CHECK-LABEL: define dso_local <4 x i32> @test_vminq_u32(
-// CHECK-SAME: <4 x i32> noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
-// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
-// CHECK-NEXT: [[VMIN_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK-NEXT: [[VMIN1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
-// CHECK-NEXT: [[VMIN2_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32> [[VMIN_I]], <4 x i32> [[VMIN1_I]])
-// CHECK-NEXT: ret <4 x i32> [[VMIN2_I]]
-//
-uint32x4_t test_vminq_u32(uint32x4_t a, uint32x4_t b) {
- return vminq_u32(a, b);
-}
-
-// CHECK-LABEL: define dso_local <4 x float> @test_vminq_f32(
-// CHECK-SAME: <4 x float> noundef [[A:%.*]], <4 x float> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
-// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[B]] to <4 x i32>
-// CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
-// CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8>
-// CHECK-NEXT: [[VMIN_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x float>
-// CHECK-NEXT: [[VMIN1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x float>
-// CHECK-NEXT: [[VMIN2_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> [[VMIN_I]], <4 x float> [[VMIN1_I]])
-// CHECK-NEXT: ret <4 x float> [[VMIN2_I]]
-//
-float32x4_t test_vminq_f32(float32x4_t a, float32x4_t b) {
- return vminq_f32(a, b);
-}
-
-// CHECK-LABEL: define dso_local <2 x double> @test_vminq_f64(
-// CHECK-SAME: <2 x double> noundef [[A:%.*]], <2 x double> noundef [[B:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT: [[ENTRY:.*:]]
-// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
-// CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[B]] to <2 x i64>
-// CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
-// CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to <16 x i8>
-// CHECK-NEXT: [[VMIN_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x double>
-// CHECK-NEXT: [[VMIN1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x double>
-// CHECK-NEXT: [[VMIN2_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> [[VMIN_I]], <2 x double> [[VMIN1_I]])
-// CHECK-NEXT: ret <2 x double> [[VMIN2_I]]
-//
-float64x2_t test_vminq_f64(float64x2_t a, float64x2_t b) {
- return vminq_f64(a, b);
-}
-
// CHECK-LABEL: define dso_local <2 x float> @test_vmaxnm_f32(
// CHECK-SAME: <2 x float> noundef [[A:%.*]], <2 x float> noundef [[B:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: [[ENTRY:.*:]]
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index 4349674a3b9e9..a8e94fd93e902 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -987,7 +987,8 @@ int64_t test_vshld_u64(int64_t a,int64_t b) {
// https://arm-software.github.io/acle/neon_intrinsics/advsimd.html#minimum
//===----------------------------------------------------------------------===//
-// ALL-LABEL: @test_vmin_s8
+// LLVM-LABEL: @test_vmin_s8
+// CIR-LABEL: @vmin_s8(
int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s8i>, !cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
@@ -997,7 +998,8 @@ int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
return vmin_s8(v1, v2);
}
-// ALL-LABEL: @test_vmin_s16
+// LLVM-LABEL: @test_vmin_s16
+// CIR-LABEL: @vmin_s16(
int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s16i>, !cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
@@ -1011,7 +1013,8 @@ int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
return vmin_s16(v1, v2);
}
-// ALL-LABEL: @test_vmin_s32
+// LLVM-LABEL: @test_vmin_s32
+// CIR-LABEL: @vmin_s32(
int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !s32i>, !cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
@@ -1025,7 +1028,8 @@ int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
return vmin_s32(v1, v2);
}
-// ALL-LABEL: @test_vmin_u8
+// LLVM-LABEL: @test_vmin_u8
+// CIR-LABEL: @vmin_u8(
uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u8i>, !cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
@@ -1035,7 +1039,8 @@ uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
return vmin_u8(v1, v2);
}
-// ALL-LABEL: @test_vmin_u16
+// LLVM-LABEL: @test_vmin_u16
+// CIR-LABEL: @vmin_u16(
uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u16i>, !cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
@@ -1049,7 +1054,8 @@ uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
return vmin_u16(v1, v2);
}
-// ALL-LABEL: @test_vmin_u32
+// LLVM-LABEL: @test_vmin_u32
+// CIR-LABEL: @vmin_u32(
uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !u32i>, !cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
@@ -1063,7 +1069,8 @@ uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
return vmin_u32(v1, v2);
}
-// ALL-LABEL: @test_vminq_s8
+// LLVM-LABEL: @test_vminq_s8
+// CIR-LABEL: @vminq_s8(
int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
@@ -1073,7 +1080,8 @@ int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
return vminq_s8(v1, v2);
}
-// ALL-LABEL: @test_vminq_s16
+// LLVM-LABEL: @test_vminq_s16
+// CIR-LABEL: @vminq_s16(
int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
@@ -1087,7 +1095,8 @@ int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
return vminq_s16(v1, v2);
}
-// ALL-LABEL: @test_vminq_s32
+// LLVM-LABEL: @test_vminq_s32
+// CIR-LABEL: @vminq_s32(
int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
@@ -1101,7 +1110,8 @@ int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
return vminq_s32(v1, v2);
}
-// ALL-LABEL: @test_vminq_u8
+// LLVM-LABEL: @test_vminq_u8
+// CIR-LABEL: @vminq_u8(
uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !u8i>, !cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
@@ -1111,7 +1121,8 @@ uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
return vminq_u8(v1, v2);
}
-// ALL-LABEL: @test_vminq_u16
+// LLVM-LABEL: @test_vminq_u16
+// CIR-LABEL: @vminq_u16(
uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u16i>, !cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
@@ -1126,7 +1137,8 @@ uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
return vminq_u16(v1, v2);
}
-// ALL-LABEL: @test_vminq_u32
+// LLVM-LABEL: @test_vminq_u32
+// CIR-LABEL: @vminq_u32
uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u32i>, !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
@@ -1140,7 +1152,8 @@ uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
return vminq_u32(v1, v2);
}
-// ALL-LABEL: @test_vmin_f32
+// LLVM-LABEL: @test_vmin_f32
+// CIR-LABEL: @vmin_f32
float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
@@ -1156,7 +1169,8 @@ float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
return vmin_f32(v1, v2);
}
-// ALL-LABEL: @test_vmin_f64
+// LLVM-LABEL: @test_vmin_f64
+// CIR-LABEL: @vmin_f64
float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
@@ -1165,7 +1179,8 @@ float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
return vmin_f64(v1, v2);
}
-// ALL-LABEL: @test_vminq_f32
+// LLVM-LABEL: @test_vminq_f32
+// CIR-LABEL: @vminq_f32
float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
@@ -1181,7 +1196,8 @@ float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
return vminq_f32(v1, v2);
}
-// ALL-LABEL: @test_vminq_f64
+// LLVM-LABEL: @test_vminq_f64
+// CIR-LABEL: @vminq_f64
float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
@@ -1197,7 +1213,8 @@ float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
return vminq_f64(v1, v2);
}
-// ALL-LABEL: @test_vminnm_f32
+// LLVM-LABEL: @test_vminnm_f32
+// CIR-LABEL: @vminnm_f32
float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
@@ -1213,7 +1230,8 @@ float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
return vminnm_f32(v1, v2);
}
-// ALL-LABEL: @test_vminnm_f64
+// LLVM-LABEL: @test_vminnm_f64
+// CIR-LABEL: @vminnm_f64
float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
@@ -1222,7 +1240,8 @@ float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
return vminnm_f64(v1, v2);
}
-// ALL-LABEL: @test_vminnmq_f32
+// LLVM-LABEL: @test_vminnmq_f32
+// CIR-LABEL: @vminnmq_f32
float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
@@ -1238,7 +1257,8 @@ float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
return vminnmq_f32(v1, v2);
}
-// ALL-LABEL: @test_vminnmq_f64
+// LLVM-LABEL: @test_vminnmq_f64
+// CIR-LABEL: @vminnmq_f64
float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
// CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
>From daa18d9ec52094f6624dd4389d9f5a292447f929 Mon Sep 17 00:00:00 2001
From: Zhihui Yang <youngwisdm at gmail.com>
Date: Tue, 24 Mar 2026 08:41:19 -0700
Subject: [PATCH 15/15] Update CIR labels for vmin and vminq functions in NEON
intrinsics
---
clang/test/CodeGen/AArch64/neon/intrinsics.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index a8e94fd93e902..18ce9f6f54cf9 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1155,7 +1155,7 @@ uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
// LLVM-LABEL: @test_vmin_f32
// CIR-LABEL: @vmin_f32
float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
- // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !cir.float>, !cir.vector<2 x !cir.float>) -> !cir.vector<2 x !cir.float>
// LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
@@ -1172,7 +1172,7 @@ float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
// LLVM-LABEL: @test_vmin_f64
// CIR-LABEL: @vmin_f64
float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
- // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<1 x !cir.double>, !cir.vector<1 x !cir.double>) -> !cir.vector<1 x !cir.double>
// LLVM: [[VMIN_V2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> %{{.*}}, <1 x double> %{{.*}})
// LLVM: ret <1 x double> [[VMIN_V2_I]]
@@ -1182,7 +1182,7 @@ float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
// LLVM-LABEL: @test_vminq_f32
// CIR-LABEL: @vminq_f32
float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
- // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !cir.float>, !cir.vector<4 x !cir.float>) -> !cir.vector<4 x !cir.float>
// LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
@@ -1199,7 +1199,7 @@ float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
// LLVM-LABEL: @test_vminq_f64
// CIR-LABEL: @vminq_f64
float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
- // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !cir.double>, !cir.vector<2 x !cir.double>) -> !cir.vector<2 x !cir.double>
// LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
@@ -1216,7 +1216,7 @@ float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
// LLVM-LABEL: @test_vminnm_f32
// CIR-LABEL: @vminnm_f32
float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
- // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !cir.float>, !cir.vector<2 x !cir.float>) -> !cir.vector<2 x !cir.float>
// LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
@@ -1233,7 +1233,7 @@ float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
// LLVM-LABEL: @test_vminnm_f64
// CIR-LABEL: @vminnm_f64
float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
- // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<1 x !cir.double>, !cir.vector<1 x !cir.double>) -> !cir.vector<1 x !cir.double>
// LLVM: [[VMINNM_V2_I:%.*]] = call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> %{{.*}}, <1 x double> %{{.*}})
// LLVM: ret <1 x double> [[VMINNM_V2_I]]
@@ -1243,7 +1243,7 @@ float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
// LLVM-LABEL: @test_vminnmq_f32
// CIR-LABEL: @vminnmq_f32
float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
- // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<4 x !cir.float>, !cir.vector<4 x !cir.float>) -> !cir.vector<4 x !cir.float>
// LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
@@ -1260,7 +1260,7 @@ float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
// LLVM-LABEL: @test_vminnmq_f64
// CIR-LABEL: @vminnmq_f64
float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
- // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !cir.double>, !cir.vector<2 x !cir.double>) -> !cir.vector<2 x !cir.double>
// LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef [[V2:%.*]]) #[[ATTR0]] {
// LLVM: [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
More information about the cfe-commits
mailing list