[clang] [CIR][AArch64] Upstream NEON Minimum (PR #187935)
via cfe-commits
cfe-commits at lists.llvm.org
Sun Mar 22 08:24:19 PDT 2026
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-clangir
Author: Zhihui Yang (YGGkk)
<details>
<summary>Changes</summary>
Part of https://github.com/llvm/llvm-project/issues/185382
Added intrinsics for
vmin_*
vminq_*
vminnm_*
vminnmq_*
---
Full diff: https://github.com/llvm/llvm-project/pull/187935.diff
2 Files Affected:
- (modified) clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp (+14)
- (modified) clang/test/CodeGen/AArch64/neon/intrinsics.c (+167)
``````````diff
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index 5d7b8d839fa84..140a6e893a394 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -2873,8 +2873,16 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr,
case NEON::BI__builtin_neon_vmax_v:
case NEON::BI__builtin_neon_vmaxq_v:
case NEON::BI__builtin_neon_vmaxh_f16:
+ cgm.errorNYI(expr->getSourceRange(),
+ std::string("unimplemented AArch64 builtin call: ") +
+ getContext().BuiltinInfo.getName(builtinID));
+ return mlir::Value{};
case NEON::BI__builtin_neon_vmin_v:
case NEON::BI__builtin_neon_vminq_v:
+ intrName = usgn ? "aarch64.neon.umin" : "aarch64.neon.smin";
+ if (cir::isFPOrVectorOfFPType(ty))
+ intrName = "aarch64.neon.fmin";
+ return emitNeonCall(cgm, builder, {ty, ty}, ops, intrName, ty, loc);
case NEON::BI__builtin_neon_vminh_f16:
cgm.errorNYI(expr->getSourceRange(),
std::string("unimplemented AArch64 builtin call: ") +
@@ -2892,8 +2900,14 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr,
case NEON::BI__builtin_neon_vpminq_v:
case NEON::BI__builtin_neon_vpmax_v:
case NEON::BI__builtin_neon_vpmaxq_v:
+ cgm.errorNYI(expr->getSourceRange(),
+ std::string("unimplemented AArch64 builtin call: ") +
+ getContext().BuiltinInfo.getName(builtinID));
+ return mlir::Value{};
case NEON::BI__builtin_neon_vminnm_v:
case NEON::BI__builtin_neon_vminnmq_v:
+ intrName = "aarch64.neon.fminnm";
+ return emitNeonCall(cgm, builder, {ty, ty}, ops, intrName, ty, loc);
case NEON::BI__builtin_neon_vminnmh_f16:
case NEON::BI__builtin_neon_vmaxnm_v:
case NEON::BI__builtin_neon_vmaxnmq_v:
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index bf8e62feda8da..fcc0b14c88695 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -982,3 +982,170 @@ int64_t test_vshld_u64(int64_t a,int64_t b) {
return (int64_t)vshld_u64(a, b);
}
+//===----------------------------------------------------------------------===//
+// 2.1.8 Minimum
+// https://arm-software.github.io/acle/neon_intrinsics/advsimd.html#minimum
+//===----------------------------------------------------------------------===//
+
+// ALL-LABEL: @test_vmin_s8
+int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s8i>, !cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
+ // LLVM: [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
+ // LLVM: ret <8 x i8> [[VMIN_I]]
+ return vmin_s8(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_s16
+int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s16i>, !cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 x i16> [[V1]], <4 x i16> [[V2]])
+ // LLVM-NEXT: ret <4 x i16> [[VMIN_I]]
+ return vmin_s16(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_s32
+int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !s32i>, !cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
+ // LLVM: [[VMIN_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 x i32> [[V1]], <2 x i32> [[V2]])
+ // LLVM-NEXT: ret <2 x i32> [[VMIN_I]]
+ return vmin_s32(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_u8
+uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u8i>, !cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
+ // LLVM: [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
+ // LLVM-NEXT: ret <8 x i8> [[VMIN_I]]
+ return vmin_u8(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_u16
+uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u16i>, !cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 x i16> [[V1]], <4 x i16> [[V2]])
+ // LLVM-NEXT: ret <4 x i16> [[VMIN_I]]
+ return vmin_u16(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_u32
+uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !u32i>, !cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
+ // LLVM: [[VMIN_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 x i32> [[V1]], <2 x i32> [[V2]])
+ // LLVM-NEXT: ret <2 x i32> [[VMIN_I]]
+ return vmin_u32(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_s8
+int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
+ // LLVM: [[VMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.smin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
+ // LLVM-NEXT: ret <16 x i8> [[VMIN_I]]
+ return vminq_s8(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_s16
+int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
+ // LLVM: [[VMIN_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 x i16> [[V1]], <8 x i16> [[V2]])
+ // LLVM-NEXT: ret <8 x i16> [[VMIN_I]]
+ return vminq_s16(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_s32
+int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 x i32> [[V1]], <4 x i32> [[V2]])
+ // LLVM-NEXT: ret <4 x i32> [[VMIN_I]]
+ return vminq_s32(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_u8
+uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<16 x !u8i>, !cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
+ // LLVM: [[VMIN_I:%.*]] = call <16 x i8> @llvm.aarch64.neon.umin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
+ // LLVM-NEXT: ret <16 x i8> [[VMIN_I]]
+ return vminq_u8(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_u16
+uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<8 x !u16i>, !cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
+ // LLVM: [[VMIN_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 x i16> [[V1]], <8 x i16> [[V2]])
+ // LLVM-NEXT: ret <8 x i16> [[VMIN_I]]
+ return vminq_u16(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_u32
+uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !u32i>, !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 x i32> [[V1]], <4 x i32> [[V2]])
+ // LLVM-NEXT: ret <4 x i32> [[VMIN_I]]
+ return vminq_u32(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_f32
+float32x4_t test_vmin_f32(float32x4_t v1, float32x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
+ // LLVM-NEXT: ret <4 x float> [[VMIN_I]]
+ return vmin_f32(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_f64
+float64x2_t test_vmin_f64(float64x2_t v1, float64x2_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+ // LLVM: [[VMIN_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
+ // LLVM-NEXT: ret <2 x double> [[VMIN_I]]
+ return vmin_f64(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_f32
+float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fmin.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
+ // LLVM-NEXT: ret <4 x float> [[VMIN_I]]
+ return vminq_f32(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_f64
+float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+ // LLVM: [[VMIN_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fmin.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
+ // LLVM-NEXT: ret <2 x double> [[VMIN_I]]
+ return vminq_f64(v1, v2);
+}
+ // LLVM: [[VMIN_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
+ // LLVM-NEXT: ret <4 x float> [[VMIN_I]]
+ return vminnm_f32(v1, v2);
+}
+
+// ALL-LABEL: @test_vminnm_f64
+float64x2_t test_vminnm_f64(float64x2_t v1, float64x2_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+ // LLVM: [[VMIN_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
+ // LLVM-NEXT: ret <2 x double> [[VMIN_I]]
+ return vminnm_f64(v1, v2);
+}
+
+// ALL-LABEL: @test_vminnmq_f32
+float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
+ // LLVM-NEXT: ret <4 x float> [[VMIN_I]]
+ return vminnmq_f32(v1, v2);
+}
+
+// ALL-LABEL: @test_vminnmq_f64
+float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+ // LLVM: [[VMIN_I:%.*]] = call <2 x double> @llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
+ // LLVM-NEXT: ret <2 x double> [[VMIN_I]]
+ return vminnmq_f64(v1, v2);
+}
+// ALL-LABEL: @test_vminnm_f32
+float32x4_t test_vminnm_f32(float32x4_t v1, float32x4_t v2) {
+ // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : (!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+ // LLVM: [[VMIN_I:%.*]] = call <4 x float> @llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
+ // LLVM-NEXT: ret <4 x float> [[VMIN_I]]
+ return vminnm_f32(v1, v2);
+}
\ No newline at end of file
``````````
</details>
https://github.com/llvm/llvm-project/pull/187935
More information about the cfe-commits
mailing list