[llvm] [PowerPC] Support conversion between f16 and f128 (PR #97677)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 24 22:17:02 PDT 2024
https://github.com/EsmeYi updated https://github.com/llvm/llvm-project/pull/97677
>From 2684fc18b7d5071a5709f8c92f5f23a9302e2327 Mon Sep 17 00:00:00 2001
From: esmeyi <esme.yi at ibm.com>
Date: Thu, 4 Jul 2024 01:05:39 -0400
Subject: [PATCH 1/2] F16 and f128 conversion.
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 6 ++
llvm/lib/Target/PowerPC/PPCInstrVSX.td | 8 ++
llvm/test/CodeGen/PowerPC/f16-to-from-f128.ll | 102 ++++++++++++++++++
3 files changed, 116 insertions(+)
create mode 100644 llvm/test/CodeGen/PowerPC/f16-to-from-f128.ll
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 4d4008ac0ba70..360c463929b62 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -211,18 +211,24 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
}
if (Subtarget.isISA3_0()) {
+ setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f16, Legal);
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
+ setTruncStoreAction(MVT::f128, MVT::f16, Legal);
setTruncStoreAction(MVT::f64, MVT::f16, Legal);
setTruncStoreAction(MVT::f32, MVT::f16, Legal);
} else {
// No extending loads from f16 or HW conversions back and forth.
+ setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f16, Expand);
+ setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);
+ setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f128, MVT::f16, Expand);
setTruncStoreAction(MVT::f64, MVT::f16, Expand);
setTruncStoreAction(MVT::f32, MVT::f16, Expand);
}
diff --git a/llvm/lib/Target/PowerPC/PPCInstrVSX.td b/llvm/lib/Target/PowerPC/PPCInstrVSX.td
index dd07892794d59..51aa0be7439c6 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrVSX.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrVSX.td
@@ -3993,6 +3993,10 @@ defm : ScalToVecWPermute<
(SUBREG_TO_REG (i64 1), (VEXTSH2Ds (LXSIHZX ForceXForm:$src)), sub_64)>;
// Load/convert and convert/store patterns for f16.
+def : Pat<(f128 (extloadf16 ForceXForm:$src)),
+ (f128 (XSCVDPQP (XSCVHPDP (LXSIHZX ForceXForm:$src))))>;
+def : Pat<(truncstoref16 f128:$src, ForceXForm:$dst),
+ (STXSIHX (XSCVDPHP (XSCVQPDP $src)), ForceXForm:$dst)>;
def : Pat<(f64 (extloadf16 ForceXForm:$src)),
(f64 (XSCVHPDP (LXSIHZX ForceXForm:$src)))>;
def : Pat<(truncstoref16 f64:$src, ForceXForm:$dst),
@@ -4001,6 +4005,8 @@ def : Pat<(f32 (extloadf16 ForceXForm:$src)),
(f32 (COPY_TO_REGCLASS (XSCVHPDP (LXSIHZX ForceXForm:$src)), VSSRC))>;
def : Pat<(truncstoref16 f32:$src, ForceXForm:$dst),
(STXSIHX (XSCVDPHP (COPY_TO_REGCLASS $src, VSFRC)), ForceXForm:$dst)>;
+def : Pat<(f128 (f16_to_fp i32:$A)),
+ (f128 (XSCVDPQP (XSCVHPDP (MTVSRWZ $A))))>;
def : Pat<(f64 (f16_to_fp i32:$A)),
(f64 (XSCVHPDP (MTVSRWZ $A)))>;
def : Pat<(f32 (f16_to_fp i32:$A)),
@@ -4008,6 +4014,8 @@ def : Pat<(f32 (f16_to_fp i32:$A)),
def : Pat<(i32 (fp_to_f16 f32:$A)),
(i32 (MFVSRWZ (XSCVDPHP (COPY_TO_REGCLASS $A, VSFRC))))>;
def : Pat<(i32 (fp_to_f16 f64:$A)), (i32 (MFVSRWZ (XSCVDPHP $A)))>;
+def : Pat<(i32 (fp_to_f16 f128:$A)),
+ (i32 (MFVSRWZ (XSCVDPHP (XSCVQPDP $A))))>;
// Vector sign extensions
def : Pat<(f64 (PPCVexts f64:$A, 1)),
diff --git a/llvm/test/CodeGen/PowerPC/f16-to-from-f128.ll b/llvm/test/CodeGen/PowerPC/f16-to-from-f128.ll
new file mode 100644
index 0000000000000..4f1e7da09b820
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/f16-to-from-f128.ll
@@ -0,0 +1,102 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown \
+; RUN: -verify-machineinstrs -ppc-asm-full-reg-names < %s | FileCheck %s \
+; RUN: --check-prefix=P8
+; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown \
+; RUN: -verify-machineinstrs -ppc-asm-full-reg-names < %s | FileCheck %s
+; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown -mattr=-hard-float \
+; RUN: -verify-machineinstrs -ppc-asm-full-reg-names < %s | FileCheck %s \
+; RUN: --check-prefix=SOFT
+
+define half @trunc(fp128 %a) unnamed_addr {
+; P8-LABEL: trunc:
+; P8: # %bb.0: # %entry
+; P8-NEXT: mflr r0
+; P8-NEXT: stdu r1, -32(r1)
+; P8-NEXT: std r0, 48(r1)
+; P8-NEXT: .cfi_def_cfa_offset 32
+; P8-NEXT: .cfi_offset lr, 16
+; P8-NEXT: bl __trunctfhf2
+; P8-NEXT: nop
+; P8-NEXT: clrldi r3, r3, 48
+; P8-NEXT: bl __gnu_h2f_ieee
+; P8-NEXT: nop
+; P8-NEXT: addi r1, r1, 32
+; P8-NEXT: ld r0, 16(r1)
+; P8-NEXT: mtlr r0
+; P8-NEXT: blr
+;
+; CHECK-LABEL: trunc:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xscvqpdp v2, v2
+; CHECK-NEXT: xscvdphp f0, vs34
+; CHECK-NEXT: mffprwz r3, f0
+; CHECK-NEXT: clrlwi r3, r3, 16
+; CHECK-NEXT: mtfprwz f0, r3
+; CHECK-NEXT: xscvhpdp f1, f0
+; CHECK-NEXT: blr
+;
+; SOFT-LABEL: trunc:
+; SOFT: # %bb.0: # %entry
+; SOFT-NEXT: mflr r0
+; SOFT-NEXT: stdu r1, -32(r1)
+; SOFT-NEXT: std r0, 48(r1)
+; SOFT-NEXT: .cfi_def_cfa_offset 32
+; SOFT-NEXT: .cfi_offset lr, 16
+; SOFT-NEXT: bl __trunctfhf2
+; SOFT-NEXT: nop
+; SOFT-NEXT: clrldi r3, r3, 48
+; SOFT-NEXT: bl __gnu_h2f_ieee
+; SOFT-NEXT: nop
+; SOFT-NEXT: bl __gnu_f2h_ieee
+; SOFT-NEXT: nop
+; SOFT-NEXT: addi r1, r1, 32
+; SOFT-NEXT: ld r0, 16(r1)
+; SOFT-NEXT: mtlr r0
+; SOFT-NEXT: blr
+entry:
+ %0 = fptrunc fp128 %a to half
+ ret half %0
+}
+
+define fp128 @ext(half %a) unnamed_addr {
+; P8-LABEL: ext:
+; P8: # %bb.0: # %entry
+; P8-NEXT: mflr r0
+; P8-NEXT: stdu r1, -32(r1)
+; P8-NEXT: std r0, 48(r1)
+; P8-NEXT: .cfi_def_cfa_offset 32
+; P8-NEXT: .cfi_offset lr, 16
+; P8-NEXT: bl __extendsfkf2
+; P8-NEXT: nop
+; P8-NEXT: addi r1, r1, 32
+; P8-NEXT: ld r0, 16(r1)
+; P8-NEXT: mtlr r0
+; P8-NEXT: blr
+;
+; CHECK-LABEL: ext:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xscpsgndp vs34, f1, f1
+; CHECK-NEXT: xscvdpqp v2, v2
+; CHECK-NEXT: blr
+;
+; SOFT-LABEL: ext:
+; SOFT: # %bb.0: # %entry
+; SOFT-NEXT: mflr r0
+; SOFT-NEXT: stdu r1, -32(r1)
+; SOFT-NEXT: std r0, 48(r1)
+; SOFT-NEXT: .cfi_def_cfa_offset 32
+; SOFT-NEXT: .cfi_offset lr, 16
+; SOFT-NEXT: clrldi r3, r3, 48
+; SOFT-NEXT: bl __gnu_h2f_ieee
+; SOFT-NEXT: nop
+; SOFT-NEXT: bl __extendsfkf2
+; SOFT-NEXT: nop
+; SOFT-NEXT: addi r1, r1, 32
+; SOFT-NEXT: ld r0, 16(r1)
+; SOFT-NEXT: mtlr r0
+; SOFT-NEXT: blr
+entry:
+ %0 = fpext half %a to fp128
+ ret fp128 %0
+}
>From 0a819fe4d475977537e00c91823983d5a8b5f3d0 Mon Sep 17 00:00:00 2001
From: esmeyi <esme.yi at ibm.com>
Date: Thu, 25 Jul 2024 01:11:05 -0400
Subject: [PATCH 2/2] For truncation from f128 to f16, always use the lib
fucntion (direct conversion) instead of HW instructions (stepwise
conversion).
---
llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 6 +++---
llvm/lib/Target/PowerPC/PPCInstrVSX.td | 4 ----
llvm/test/CodeGen/PowerPC/f16-to-from-f128.ll | 13 ++++++++++---
3 files changed, 13 insertions(+), 10 deletions(-)
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 360c463929b62..18fe4748d0d38 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -210,25 +210,25 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
}
+ setTruncStoreAction(MVT::f128, MVT::f16, Expand);
+ setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);
+
if (Subtarget.isISA3_0()) {
setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f16, Legal);
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
- setTruncStoreAction(MVT::f128, MVT::f16, Legal);
setTruncStoreAction(MVT::f64, MVT::f16, Legal);
setTruncStoreAction(MVT::f32, MVT::f16, Legal);
} else {
// No extending loads from f16 or HW conversions back and forth.
setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f16, Expand);
setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);
- setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
- setTruncStoreAction(MVT::f128, MVT::f16, Expand);
setTruncStoreAction(MVT::f64, MVT::f16, Expand);
setTruncStoreAction(MVT::f32, MVT::f16, Expand);
}
diff --git a/llvm/lib/Target/PowerPC/PPCInstrVSX.td b/llvm/lib/Target/PowerPC/PPCInstrVSX.td
index 51aa0be7439c6..f42df59577c5b 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrVSX.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrVSX.td
@@ -3995,8 +3995,6 @@ defm : ScalToVecWPermute<
// Load/convert and convert/store patterns for f16.
def : Pat<(f128 (extloadf16 ForceXForm:$src)),
(f128 (XSCVDPQP (XSCVHPDP (LXSIHZX ForceXForm:$src))))>;
-def : Pat<(truncstoref16 f128:$src, ForceXForm:$dst),
- (STXSIHX (XSCVDPHP (XSCVQPDP $src)), ForceXForm:$dst)>;
def : Pat<(f64 (extloadf16 ForceXForm:$src)),
(f64 (XSCVHPDP (LXSIHZX ForceXForm:$src)))>;
def : Pat<(truncstoref16 f64:$src, ForceXForm:$dst),
@@ -4014,8 +4012,6 @@ def : Pat<(f32 (f16_to_fp i32:$A)),
def : Pat<(i32 (fp_to_f16 f32:$A)),
(i32 (MFVSRWZ (XSCVDPHP (COPY_TO_REGCLASS $A, VSFRC))))>;
def : Pat<(i32 (fp_to_f16 f64:$A)), (i32 (MFVSRWZ (XSCVDPHP $A)))>;
-def : Pat<(i32 (fp_to_f16 f128:$A)),
- (i32 (MFVSRWZ (XSCVDPHP (XSCVQPDP $A))))>;
// Vector sign extensions
def : Pat<(f64 (PPCVexts f64:$A, 1)),
diff --git a/llvm/test/CodeGen/PowerPC/f16-to-from-f128.ll b/llvm/test/CodeGen/PowerPC/f16-to-from-f128.ll
index 4f1e7da09b820..43e08627012e9 100644
--- a/llvm/test/CodeGen/PowerPC/f16-to-from-f128.ll
+++ b/llvm/test/CodeGen/PowerPC/f16-to-from-f128.ll
@@ -28,12 +28,19 @@ define half @trunc(fp128 %a) unnamed_addr {
;
; CHECK-LABEL: trunc:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xscvqpdp v2, v2
-; CHECK-NEXT: xscvdphp f0, vs34
-; CHECK-NEXT: mffprwz r3, f0
+; CHECK-NEXT: mflr r0
+; CHECK-NEXT: stdu r1, -32(r1)
+; CHECK-NEXT: std r0, 48(r1)
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset lr, 16
+; CHECK-NEXT: bl __trunctfhf2
+; CHECK-NEXT: nop
; CHECK-NEXT: clrlwi r3, r3, 16
; CHECK-NEXT: mtfprwz f0, r3
; CHECK-NEXT: xscvhpdp f1, f0
+; CHECK-NEXT: addi r1, r1, 32
+; CHECK-NEXT: ld r0, 16(r1)
+; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
;
; SOFT-LABEL: trunc:
More information about the llvm-commits
mailing list