[llvm] [AArch64][SDAG] Add f16 -> i16 rounding NEON conversion intrinsics (PR #155851)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 28 08:10:34 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-aarch64
Author: Kajetan Puchalski (mrkajetanp)
<details>
<summary>Changes</summary>
Add dedicated .i16.f16 formats for rounding NEON conversion intrinsics in order to avoid issues with incorrect overflow behaviour caused by using .i32.f16 formats to perform the same conversions.
Added intrinsic formats:
i16 @<!-- -->llvm.aarch64.neon.fcvtzu.i16.f16(half)
i16 @<!-- -->llvm.aarch64.neon.fcvtzs.i16.f16(half)
i16 @<!-- -->llvm.aarch64.neon.fcvtas.i16.f16(half)
i16 @<!-- -->llvm.aarch64.neon.fcvtms.i16.f16(half)
i16 @<!-- -->llvm.aarch64.neon.fcvtns.i16.f16(half)
i16 @<!-- -->llvm.aarch64.neon.fcvtps.i16.f16(half)
---
Full diff: https://github.com/llvm/llvm-project/pull/155851.diff
4 Files Affected:
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+38)
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.h (+2)
- (modified) llvm/lib/Target/AArch64/AArch64InstrInfo.td (+8)
- (added) llvm/test/CodeGen/AArch64/fp16_i16_intrinsic_scalar.ll (+80)
``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 23328ed57fb36..c135e0acafc49 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1290,6 +1290,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
+
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
} else {
// when AArch64 doesn't have fullfp16 support, promote the input
// to i32 first.
@@ -27802,6 +27804,18 @@ void AArch64TargetLowering::ReplaceExtractSubVectorResults(
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Half));
}
+void AArch64TargetLowering::ReplaceFcvtFpToI16Intrinsic(
+ SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG,
+ unsigned Opcode) const {
+ if (N->getValueType(0).getScalarType() != MVT::i16)
+ return;
+
+ SDLoc DL(N);
+ SDValue CVT = DAG.getNode(Opcode, DL, MVT::f32, N->getOperand(1));
+ SDValue Bitcast = DAG.getBitcast(MVT::i32, CVT);
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Bitcast));
+}
+
void AArch64TargetLowering::ReplaceGetActiveLaneMaskResults(
SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
assert((Subtarget->hasSVE2p1() ||
@@ -28292,6 +28306,30 @@ void AArch64TargetLowering::ReplaceNodeResults(
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V));
return;
}
+ case Intrinsic::aarch64_neon_fcvtzs: {
+ ReplaceFcvtFpToI16Intrinsic(N, Results, DAG, AArch64ISD::FCVTZS_HALF);
+ return;
+ }
+ case Intrinsic::aarch64_neon_fcvtzu: {
+ ReplaceFcvtFpToI16Intrinsic(N, Results, DAG, AArch64ISD::FCVTZU_HALF);
+ return;
+ }
+ case Intrinsic::aarch64_neon_fcvtas: {
+ ReplaceFcvtFpToI16Intrinsic(N, Results, DAG, AArch64ISD::FCVTAS_HALF);
+ return;
+ }
+ case Intrinsic::aarch64_neon_fcvtms: {
+ ReplaceFcvtFpToI16Intrinsic(N, Results, DAG, AArch64ISD::FCVTMS_HALF);
+ return;
+ }
+ case Intrinsic::aarch64_neon_fcvtns: {
+ ReplaceFcvtFpToI16Intrinsic(N, Results, DAG, AArch64ISD::FCVTNS_HALF);
+ return;
+ }
+ case Intrinsic::aarch64_neon_fcvtps: {
+ ReplaceFcvtFpToI16Intrinsic(N, Results, DAG, AArch64ISD::FCVTPS_HALF);
+ return;
+ }
}
}
case ISD::READ_REGISTER: {
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 46738365080f9..1805875688ddb 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -850,6 +850,8 @@ class AArch64TargetLowering : public TargetLowering {
void ReplaceExtractSubVectorResults(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const;
+ void ReplaceFcvtFpToI16Intrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG, unsigned Opcode) const;
void ReplaceGetActiveLaneMaskResults(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 07c07008c0e05..24f9a6d00eac0 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -991,6 +991,10 @@ def AArch64fcvtxnv: PatFrags<(ops node:$Rn),
def AArch64fcvtzs_half : SDNode<"AArch64ISD::FCVTZS_HALF", SDTFPExtendOp>;
def AArch64fcvtzu_half : SDNode<"AArch64ISD::FCVTZU_HALF", SDTFPExtendOp>;
+def AArch64fcvtas_half : SDNode<"AArch64ISD::FCVTAS_HALF", SDTFPExtendOp>;
+def AArch64fcvtms_half : SDNode<"AArch64ISD::FCVTMS_HALF", SDTFPExtendOp>;
+def AArch64fcvtns_half : SDNode<"AArch64ISD::FCVTNS_HALF", SDTFPExtendOp>;
+def AArch64fcvtps_half : SDNode<"AArch64ISD::FCVTPS_HALF", SDTFPExtendOp>;
//def Aarch64softf32tobf16v8: SDNode<"AArch64ISD::", SDTFPRoundOp>;
@@ -6550,6 +6554,10 @@ class F16ToI16ScalarPat<SDNode cvt_isd, BaseSIMDTwoScalar instr>
let Predicates = [HasFullFP16] in {
def : F16ToI16ScalarPat<AArch64fcvtzs_half, FCVTZSv1f16>;
def : F16ToI16ScalarPat<AArch64fcvtzu_half, FCVTZUv1f16>;
+def : F16ToI16ScalarPat<AArch64fcvtas_half, FCVTASv1f16>;
+def : F16ToI16ScalarPat<AArch64fcvtms_half, FCVTMSv1f16>;
+def : F16ToI16ScalarPat<AArch64fcvtns_half, FCVTNSv1f16>;
+def : F16ToI16ScalarPat<AArch64fcvtps_half, FCVTPSv1f16>;
}
// Round FP64 to BF16.
diff --git a/llvm/test/CodeGen/AArch64/fp16_i16_intrinsic_scalar.ll b/llvm/test/CodeGen/AArch64/fp16_i16_intrinsic_scalar.ll
new file mode 100644
index 0000000000000..30bc80821ed80
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/fp16_i16_intrinsic_scalar.ll
@@ -0,0 +1,80 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=aarch64 -global-isel=0 -mattr=+v8.2a,+fullfp16 | FileCheck %s
+
+; Test f16 -> i16 NEON intrinics, currently only supported in SDAG.
+; Should be merged with fp16_intrinsic_scalar_1op.ll once there is
+; support in GlSel.
+
+declare i16 @llvm.aarch64.neon.fcvtzu.i16.f16(half)
+declare i16 @llvm.aarch64.neon.fcvtzs.i16.f16(half)
+declare i16 @llvm.aarch64.neon.fcvtas.i16.f16(half)
+declare i16 @llvm.aarch64.neon.fcvtms.i16.f16(half)
+declare i16 @llvm.aarch64.neon.fcvtns.i16.f16(half)
+declare i16 @llvm.aarch64.neon.fcvtps.i16.f16(half)
+
+
+define i16 @fcvtzu_intrinsic_i16(half %a) {
+; CHECK-LABEL: fcvtzu_intrinsic_i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzu h0, h0
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+entry:
+ %fcvt = tail call i16 @llvm.aarch64.neon.fcvtzu.i16.f16(half %a)
+ ret i16 %fcvt
+}
+
+define i16 @fcvtzs_intrinsic_i16(half %a) {
+; CHECK-LABEL: fcvtzs_intrinsic_i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtzs h0, h0
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+entry:
+ %fcvt = tail call i16 @llvm.aarch64.neon.fcvtzs.i16.f16(half %a)
+ ret i16 %fcvt
+}
+
+define i16 @fcvtas_intrinsic_i16(half %a) {
+; CHECK-LABEL: fcvtas_intrinsic_i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtas h0, h0
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+entry:
+ %fcvt = tail call i16 @llvm.aarch64.neon.fcvtas.i16.f16(half %a)
+ ret i16 %fcvt
+}
+
+define i16 @fcvtms_intrinsic_i16(half %a) {
+; CHECK-LABEL: fcvtms_intrinsic_i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtms h0, h0
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+entry:
+ %fcvt = tail call i16 @llvm.aarch64.neon.fcvtms.i16.f16(half %a)
+ ret i16 %fcvt
+}
+
+define i16 @fcvtns_intrinsic_i16(half %a) {
+; CHECK-LABEL: fcvtns_intrinsic_i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtns h0, h0
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+entry:
+ %fcvt = tail call i16 @llvm.aarch64.neon.fcvtns.i16.f16(half %a)
+ ret i16 %fcvt
+}
+
+define i16 @fcvtps_intrinsic_i16(half %a) {
+; CHECK-LABEL: fcvtps_intrinsic_i16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fcvtps h0, h0
+; CHECK-NEXT: fmov w0, s0
+; CHECK-NEXT: ret
+entry:
+ %fcvt = tail call i16 @llvm.aarch64.neon.fcvtps.i16.f16(half %a)
+ ret i16 %fcvt
+}
``````````
</details>
https://github.com/llvm/llvm-project/pull/155851
More information about the llvm-commits
mailing list