[llvm] r330034 - [ARM] FP16 vmaxnm/vminnm scalar instructions
Sjoerd Meijer via llvm-commits
llvm-commits at lists.llvm.org
Fri Apr 13 08:34:26 PDT 2018
Author: sjoerdmeijer
Date: Fri Apr 13 08:34:26 2018
New Revision: 330034
URL: http://llvm.org/viewvc/llvm-project?rev=330034&view=rev
Log:
[ARM] FP16 vmaxnm/vminnm scalar instructions
This adds code generation support for the FP16 vmaxnm/vminnm scalar
instructions.
Differential Revision: https://reviews.llvm.org/D44675
Added:
llvm/trunk/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll
llvm/trunk/test/CodeGen/ARM/fp16-vminmaxnm.ll
Modified:
llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
llvm/trunk/lib/Target/ARM/ARMInstrNEON.td
llvm/trunk/lib/Target/ARM/ARMInstrVFP.td
llvm/trunk/test/CodeGen/ARM/fp16-instructions.ll
Modified: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp?rev=330034&r1=330033&r2=330034&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp Fri Apr 13 08:34:26 2018
@@ -527,6 +527,9 @@ ARMTargetLowering::ARMTargetLowering(con
setOperationAction(ISD::BITCAST, MVT::i16, Custom);
setOperationAction(ISD::BITCAST, MVT::i32, Custom);
setOperationAction(ISD::BITCAST, MVT::f16, Custom);
+
+ setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
+ setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
}
for (MVT VT : MVT::vector_valuetypes()) {
@@ -1147,6 +1150,8 @@ ARMTargetLowering::ARMTargetLowering(con
if (Subtarget->hasNEON()) {
// vmin and vmax aren't available in a scalar form, so we use
// a NEON instruction with an undef lane instead.
+ setOperationAction(ISD::FMINNAN, MVT::f16, Legal);
+ setOperationAction(ISD::FMAXNAN, MVT::f16, Legal);
setOperationAction(ISD::FMINNAN, MVT::f32, Legal);
setOperationAction(ISD::FMAXNAN, MVT::f32, Legal);
setOperationAction(ISD::FMINNAN, MVT::v2f32, Legal);
Modified: llvm/trunk/lib/Target/ARM/ARMInstrNEON.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrNEON.td?rev=330034&r1=330033&r2=330034&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrNEON.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMInstrNEON.td Fri Apr 13 08:34:26 2018
@@ -6870,6 +6870,17 @@ class N3VSPat<SDNode OpNode, NeonI Inst>
(v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
SPR:$b, ssub_0)), DPR_VFP2)), ssub_0)>;
+class N3VSPatFP16<SDNode OpNode, NeonI Inst>
+ : NEONFPPat<(f16 (OpNode HPR:$a, HPR:$b)),
+ (EXTRACT_SUBREG
+ (v4f16 (COPY_TO_REGCLASS (Inst
+ (INSERT_SUBREG
+ (v4f16 (COPY_TO_REGCLASS (v4f16 (IMPLICIT_DEF)), DPR_VFP2)),
+ HPR:$a, ssub_0),
+ (INSERT_SUBREG
+ (v4f16 (COPY_TO_REGCLASS (v4f16 (IMPLICIT_DEF)), DPR_VFP2)),
+ HPR:$b, ssub_0)), DPR_VFP2)), ssub_0)>;
+
class N3VSMulOpPat<SDNode MulNode, SDNode OpNode, NeonI Inst>
: NEONFPPat<(f32 (OpNode SPR:$acc, (f32 (MulNode SPR:$a, SPR:$b)))),
(EXTRACT_SUBREG
@@ -6912,6 +6923,8 @@ def : N3VSMulOpPat<fmul, fsub, VFMSfd>,
Requires<[HasVFP4, UseNEONForFP, UseFusedMAC]>;
def : N2VSPat<fabs, VABSfd>;
def : N2VSPat<fneg, VNEGfd>;
+def : N3VSPatFP16<fmaxnan, VMAXhd>, Requires<[HasFullFP16]>;
+def : N3VSPatFP16<fminnan, VMINhd>, Requires<[HasFullFP16]>;
def : N3VSPat<fmaxnan, VMAXfd>, Requires<[HasNEON]>;
def : N3VSPat<fminnan, VMINfd>, Requires<[HasNEON]>;
def : NVCVTFIPat<fp_to_sint, VCVTf2sd>;
Modified: llvm/trunk/lib/Target/ARM/ARMInstrVFP.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrVFP.td?rev=330034&r1=330033&r2=330034&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrVFP.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMInstrVFP.td Fri Apr 13 08:34:26 2018
@@ -482,9 +482,9 @@ defm VSELVS : vsel_inst<"vs", 0b01, 6>;
multiclass vmaxmin_inst<string op, bit opc, SDNode SD> {
let DecoderNamespace = "VFPV8", PostEncoderMethod = "" in {
def H : AHbInp<0b11101, 0b00, opc,
- (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
+ (outs HPR:$Sd), (ins HPR:$Sn, HPR:$Sm),
NoItinerary, !strconcat(op, ".f16\t$Sd, $Sn, $Sm"),
- []>,
+ [(set HPR:$Sd, (SD HPR:$Sn, HPR:$Sm))]>,
Requires<[HasFullFP16]>;
def S : ASbInp<0b11101, 0b00, opc,
Modified: llvm/trunk/test/CodeGen/ARM/fp16-instructions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fp16-instructions.ll?rev=330034&r1=330033&r2=330034&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fp16-instructions.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fp16-instructions.ll Fri Apr 13 08:34:26 2018
@@ -28,6 +28,10 @@
; RUN: llc < %s -mtriple=arm-none-eabihf -mattr=+fullfp16 -fp-contract=fast | FileCheck %s --check-prefixes=CHECK,CHECK-HARDFP-FULLFP16-FAST
; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mattr=+fullfp16 -fp-contract=fast | FileCheck %s --check-prefixes=CHECK,CHECK-HARDFP-FULLFP16-FAST
+; TODO: we can't pass half-precision arguments as "half" types yet. We do
+; that for the time being by passing "float %f.coerce" and the necessary
+; bitconverts/truncates. But when we can pass half types, we do want to use
+; and test that here.
define float @RetValBug(float %A.coerce) {
entry:
@@ -477,9 +481,10 @@ entry:
; CHECK-HARDFP-FULLFP16-FAST-NEXT: vmov.f32 s0, s2
}
-; TODO:
; 17. VMAXNM
; 18. VMINNM
+; Tested in fp16-vminmaxnm.ll and fp16-vminmaxnm-safe.ll
+
; 19. VMLA
define float @VMLA(float %a.coerce, float %b.coerce, float %c.coerce) {
Added: llvm/trunk/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll?rev=330034&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/fp16-vminmaxnm-safe.ll Fri Apr 13 08:34:26 2018
@@ -0,0 +1,366 @@
+; RUN: llc < %s -mtriple=armv8-eabi -mattr=+fullfp16 | FileCheck %s
+; RUN: llc < %s -mtriple thumbv7a -mattr=+fullfp16 | FileCheck %s
+
+; TODO: we can't pass half-precision arguments as "half" types yet. We do
+; that for the time being by passing "float %f.coerce" and the necessary
+; bitconverts/truncates. In these tests we pass i16 and use 1 bitconvert, which
+; is the shortest way to get a half type. But when we can pass half types, we
+; want to use that here.
+
+define half @fp16_vminnm_o(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_o:
+; CHECK-NOT: vminnm.f16
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp olt half %0, %1
+ %cond = select i1 %cmp, half %0, half %1
+ ret half %cond
+}
+
+define half @fp16_vminnm_o_rev(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_o_rev:
+; CHECK-NOT: vminnm.f16
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp ogt half %0, %1
+ %cond = select i1 %cmp, half %0, half %1
+ ret half %cond
+}
+
+define half @fp16_vminnm_u(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_u:
+; CHECK-NOT: vminnm.f16
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp ult half %0, %1
+ %cond = select i1 %cmp, half %0, half %1
+ ret half %cond
+}
+
+define half @fp16_vminnm_ule(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_ule:
+; CHECK-NOT: vminnm.f16
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp ule half %0, %1
+ %cond = select i1 %cmp, half %0, half %1
+ ret half %cond
+}
+
+define half @fp16_vminnm_u_rev(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_u_rev:
+; CHECK-NOT: vminnm.f16
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp ugt half %0, %1
+ %cond = select i1 %cmp, half %1, half %0
+ ret half %cond
+}
+
+define half @fp16_vmaxnm_o(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_o:
+; CHECK-NOT: vmaxnm.f16
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp ogt half %0, %1
+ %cond = select i1 %cmp, half %0, half %1
+ ret half %cond
+}
+
+define half @fp16_vmaxnm_oge(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_oge:
+; CHECK-NOT: vmaxnm.f16
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp oge half %0, %1
+ %cond = select i1 %cmp, half %0, half %1
+ ret half %cond
+}
+
+define half @fp16_vmaxnm_o_rev(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_o_rev:
+; CHECK-NOT: vmaxnm.f16
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp olt half %0, %1
+ %cond = select i1 %cmp, half %1, half %0
+ ret half %cond
+}
+
+define half @fp16_vmaxnm_ole_rev(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_ole_rev:
+; CHECK-NOT: vmaxnm.f16
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp ole half %0, %1
+ %cond = select i1 %cmp, half %1, half %0
+ ret half %cond
+}
+
+define half @fp16_vmaxnm_u(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_u:
+; CHECK-NOT: vmaxnm.f16
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp ugt half %0, %1
+ %cond = select i1 %cmp, half %0, half %1
+ ret half %cond
+}
+
+define half @fp16_vmaxnm_uge(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_uge:
+; CHECK-NOT: vmaxnm.f16
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp uge half %0, %1
+ %cond = select i1 %cmp, half %0, half %1
+ ret half %cond
+}
+
+define half @fp16_vmaxnm_u_rev(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_u_rev:
+; CHECK-NOT: vmaxnm.f16
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp ult half %0, %1
+ %cond = select i1 %cmp, half %1, half %0
+ ret half %cond
+}
+
+; known non-NaNs
+
+define half @fp16_vminnm_NNNo(i16 signext %a) {
+; CHECK-LABEL: fp16_vminnm_NNNo:
+; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01
+; CHECK: vmov.f16 [[S4:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s2, [[S4]], [[S2]]
+; CHECK: vmin.f16 d0, d1, d0
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp olt half %0, 12.
+ %cond1 = select i1 %cmp1, half %0, half 12.
+ %cmp2 = fcmp olt half 34., %cond1
+ %cond2 = select i1 %cmp2, half 34., half %cond1
+ ret half %cond2
+}
+
+define half @fp16_vminnm_NNNo_rev(i16 signext %a) {
+; CHECK-LABEL: fp16_vminnm_NNNo_rev:
+; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vmin.f16 d0, d1, d0
+; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
+; CHECK: vminnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp ogt half %0, 56.
+ %cond1 = select i1 %cmp1, half 56., half %0
+ %cmp2 = fcmp ogt half 78., %cond1
+ %cond2 = select i1 %cmp2, half %cond1, half 78.
+ ret half %cond2
+}
+
+define half @fp16_vminnm_NNNu(i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_NNNu:
+; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01
+; CHECK: vmov.f16 [[S4:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s2, [[S4]], [[S2]]
+; CHECK: vmin.f16 d0, d1, d0
+entry:
+ %0 = bitcast i16 %b to half
+ %cmp1 = fcmp ult half 12., %0
+ %cond1 = select i1 %cmp1, half 12., half %0
+ %cmp2 = fcmp ult half %cond1, 34.
+ %cond2 = select i1 %cmp2, half %cond1, half 34.
+ ret half %cond2
+}
+
+define half @fp16_vminnm_NNNule(i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_NNNule:
+; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmov.f16 [[S4:s[0-9]]], r{{.}}
+; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
+; CHECK: vminnm.f16 s2, [[S4]], [[S2]]
+; CHECK: vmin.f16 d0, d1, d0
+
+entry:
+ %0 = bitcast i16 %b to half
+ %cmp1 = fcmp ule half 34., %0
+ %cond1 = select i1 %cmp1, half 34., half %0
+ %cmp2 = fcmp ule half %cond1, 56.
+ %cond2 = select i1 %cmp2, half %cond1, half 56.
+ ret half %cond2
+}
+
+define half @fp16_vminnm_NNNu_rev(i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_NNNu_rev:
+
+; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vmin.f16 d0, d1, d0
+; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
+; CHECK: vminnm.f16 s0, [[S0]], [[S2]]
+
+entry:
+ %0 = bitcast i16 %b to half
+ %cmp1 = fcmp ugt half 56., %0
+ %cond1 = select i1 %cmp1, half %0, half 56.
+ %cmp2 = fcmp ugt half %cond1, 78.
+ %cond2 = select i1 %cmp2, half 78., half %cond1
+ ret half %cond2
+}
+
+define half @fp16_vmaxnm_NNNo(i16 signext %a) {
+; CHECK-LABEL: fp16_vmaxnm_NNNo:
+; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01
+; CHECK: vmov.f16 [[S4:s[0-9]]], r{{.}}
+; CHECK: vmaxnm.f16 s2, [[S4]], [[S2]]
+; CHECK: vmax.f16 d0, d1, d0
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp ogt half %0, 12.
+ %cond1 = select i1 %cmp1, half %0, half 12.
+ %cmp2 = fcmp ogt half 34., %cond1
+ %cond2 = select i1 %cmp2, half 34., half %cond1
+ ret half %cond2
+}
+
+define half @fp16_vmaxnm_NNNoge(i16 signext %a) {
+; CHECK-LABEL: fp16_vmaxnm_NNNoge:
+; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmov.f16 [[S4:s[0-9]]], r{{.}}
+; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmaxnm.f16 s2, [[S4]], [[S2]]
+; CHECK: vmax.f16 d0, d1, d0
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp oge half %0, 34.
+ %cond1 = select i1 %cmp1, half %0, half 34.
+ %cmp2 = fcmp oge half 56., %cond1
+ %cond2 = select i1 %cmp2, half 56., half %cond1
+ ret half %cond2
+}
+
+define half @fp16_vmaxnm_NNNo_rev(i16 signext %a) {
+; CHECK-LABEL: fp16_vmaxnm_NNNo_rev:
+; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vmax.f16 d0, d1, d0
+; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp olt half %0, 56.
+ %cond1 = select i1 %cmp1, half 56., half %0
+ %cmp2 = fcmp olt half 78., %cond1
+ %cond2 = select i1 %cmp2, half %cond1, half 78.
+ ret half %cond2
+}
+
+define half @fp16_vmaxnm_NNNole_rev(i16 signext %a) {
+; CHECK-LABEL: fp16_vmaxnm_NNNole_rev:
+; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vmax.f16 d0, d1, d0
+; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp ole half %0, 78.
+ %cond1 = select i1 %cmp1, half 78., half %0
+ %cmp2 = fcmp ole half 90., %cond1
+ %cond2 = select i1 %cmp2, half %cond1, half 90.
+ ret half %cond2
+}
+
+define half @fp16_vmaxnm_NNNu(i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_NNNu:
+; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01
+; CHECK: vmov.f16 [[S4:s[0-9]]], r{{.}}
+; CHECK: vmaxnm.f16 s2, [[S4]], [[S2]]
+; CHECK: vmax.f16 d0, d1, d0
+entry:
+ %0 = bitcast i16 %b to half
+ %cmp1 = fcmp ugt half 12., %0
+ %cond1 = select i1 %cmp1, half 12., half %0
+ %cmp2 = fcmp ugt half %cond1, 34.
+ %cond2 = select i1 %cmp2, half %cond1, half 34.
+ ret half %cond2
+}
+
+define half @fp16_vmaxnm_NNNuge(i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_NNNuge:
+; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmov.f16 [[S4:s[0-9]]], r{{.}}
+; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmaxnm.f16 s2, [[S4]], [[S2]]
+; CHECK: vmax.f16 d0, d1, d0
+entry:
+ %0 = bitcast i16 %b to half
+ %cmp1 = fcmp uge half 34., %0
+ %cond1 = select i1 %cmp1, half 34., half %0
+ %cmp2 = fcmp uge half %cond1, 56.
+ %cond2 = select i1 %cmp2, half %cond1, half 56.
+ ret half %cond2
+}
+
+define half @fp16_vminmaxnm_neg0(i16 signext %a) {
+; CHECK-LABEL: fp16_vminmaxnm_neg0:
+; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s2, [[S2]], [[S0]]
+; CHECK: vmax.f16 d0, d1, d0
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp olt half %0, -0.
+ %cond1 = select i1 %cmp1, half %0, half -0.
+ %cmp2 = fcmp ugt half %cond1, -0.
+ %cond2 = select i1 %cmp2, half %cond1, half -0.
+ ret half %cond2
+}
+
+define half @fp16_vminmaxnm_e_0(i16 signext %a) {
+; CHECK-LABEL: fp16_vminmaxnm_e_0:
+; CHECK: vldr.16 [[S2:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmin.f16 d0, d0, d1
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp nsz ole half 0., %0
+ %cond1 = select i1 %cmp1, half 0., half %0
+ %cmp2 = fcmp nsz uge half 0., %cond1
+ %cond2 = select i1 %cmp2, half 0., half %cond1
+ ret half %cond2
+}
+
+define half @fp16_vminmaxnm_e_neg0(i16 signext %a) {
+; CHECK-LABEL: fp16_vminmaxnm_e_neg0:
+; CHECK: vldr.16 [[S0:s[0-9]]], .LCPI{{.*}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s2, [[S2]], [[S0]]
+; CHECK: vmax.f16 d0, d1, d0
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp nsz ule half -0., %0
+ %cond1 = select i1 %cmp1, half -0., half %0
+ %cmp2 = fcmp nsz oge half -0., %cond1
+ %cond2 = select i1 %cmp2, half -0., half %cond1
+ ret half %cond2
+}
Added: llvm/trunk/test/CodeGen/ARM/fp16-vminmaxnm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fp16-vminmaxnm.ll?rev=330034&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fp16-vminmaxnm.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/fp16-vminmaxnm.ll Fri Apr 13 08:34:26 2018
@@ -0,0 +1,418 @@
+; RUN: llc < %s -mtriple=arm-eabi -mattr=+fullfp16 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple thumbv7a -mattr=+fullfp16 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s
+
+; TODO: we can't pass half-precision arguments as "half" types yet. We do
+; that for the time being by passing "float %f.coerce" and the necessary
+; bitconverts/truncates. In these tests we pass i16 and use 1 bitconvert, which
+; is the shortest way to get a half type. But when we can pass half types, we
+; want to use that here.
+
+define half @fp16_vminnm_o(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_o:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp fast olt half %0, %1
+ %cond = select i1 %cmp, half %0, half %1
+ ret half %cond
+}
+
+define half @fp16_vminnm_o_rev(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_o_rev:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp fast ogt half %0, %1
+ %cond = select i1 %cmp, half %1, half %0
+ ret half %cond
+}
+
+define half @fp16_vminnm_u(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_u:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp fast ult half %0, %1
+ %cond = select i1 %cmp, half %0, half %1
+ ret half %cond
+}
+
+define half @fp16_vminnm_ule(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_ule:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp fast ule half %0, %1
+ %cond = select i1 %cmp, half %0, half %1
+ ret half %cond
+}
+
+define half @fp16_vminnm_u_rev(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_u_rev:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp fast ugt half %0, %1
+ %cond = select i1 %cmp, half %1, half %0
+ ret half %cond
+}
+
+define half @fp16_vmaxnm_o(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_o:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp fast ogt half %0, %1
+ %cond = select i1 %cmp, half %0, half %1
+ ret half %cond
+}
+
+define half @fp16_vmaxnm_oge(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_oge:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp fast oge half %0, %1
+ %cond = select i1 %cmp, half %0, half %1
+ ret half %cond
+}
+
+define half @fp16_vmaxnm_o_rev(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_o_rev:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp fast olt half %0, %1
+ %cond = select i1 %cmp, half %1, half %0
+ ret half %cond
+}
+
+define half @fp16_vmaxnm_ole_rev(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_ole_rev:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp fast ole half %0, %1
+ %cond = select i1 %cmp, half %1, half %0
+ ret half %cond
+}
+
+define half @fp16_vmaxnm_u(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_u:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp fast ugt half %0, %1
+ %cond = select i1 %cmp, half %0, half %1
+ ret half %cond
+}
+
+define half @fp16_vmaxnm_uge(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_uge:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp fast uge half %0, %1
+ %cond = select i1 %cmp, half %0, half %1
+ ret half %cond
+}
+
+define half @fp16_vmaxnm_u_rev(i16 signext %a, i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_u_rev:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %1 = bitcast i16 %b to half
+ %cmp = fcmp fast ult half %0, %1
+ %cond = select i1 %cmp, half %1, half %0
+ ret half %cond
+}
+
+; known non-NaNs
+
+define half @fp16_vminnm_NNNo(i16 signext %a) {
+; CHECK-LABEL: fp16_vminnm_NNNo:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01
+; CHECK: vminnm.f16 s0, [[S0]], [[S2]]
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vminnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp fast olt half %0, 12.
+ %cond1 = select i1 %cmp1, half %0, half 12.
+ %cmp2 = fcmp fast olt half 34., %cond1
+ %cond2 = select i1 %cmp2, half 34., half %cond1
+ ret half %cond2
+}
+
+define half @fp16_vminnm_NNNo_rev(i16 signext %a) {
+; CHECK-LABEL: fp16_vminnm_NNNo_rev:
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s0, [[S0]], [[S2]]
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vminnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp fast ogt half %0, 56.
+ %cond1 = select i1 %cmp1, half 56., half %0
+ %cmp2 = fcmp fast ogt half 78., %cond1
+ %cond2 = select i1 %cmp2, half %cond1, half 78.
+ ret half %cond2
+}
+
+define half @fp16_vminnm_NNNu(i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_NNNu:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01
+; CHECK: vminnm.f16 s0, [[S0]], [[S2]]
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vminnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %b to half
+ %cmp1 = fcmp fast ult half 12., %0
+ %cond1 = select i1 %cmp1, half 12., half %0
+ %cmp2 = fcmp fast ult half %cond1, 34.
+ %cond2 = select i1 %cmp2, half %cond1, half 34.
+ ret half %cond2
+}
+
+define half @fp16_vminnm_NNNule(i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_NNNule:
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s0, [[S0]], [[S2]]
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vminnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %b to half
+ %cmp1 = fcmp fast ule half 34., %0
+ %cond1 = select i1 %cmp1, half 34., half %0
+ %cmp2 = fcmp fast ule half %cond1, 56.
+ %cond2 = select i1 %cmp2, half %cond1, half 56.
+ ret half %cond2
+}
+
+define half @fp16_vminnm_NNNu_rev(i16 signext %b) {
+; CHECK-LABEL: fp16_vminnm_NNNu_rev:
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s0, [[S0]], [[S2]]
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vminnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %b to half
+ %cmp1 = fcmp fast ugt half 56., %0
+ %cond1 = select i1 %cmp1, half %0, half 56.
+ %cmp2 = fcmp fast ugt half %cond1, 78.
+ %cond2 = select i1 %cmp2, half 78., half %cond1
+ ret half %cond2
+}
+
+define half @fp16_vmaxnm_NNNo(i16 signext %a) {
+; CHECK-LABEL: fp16_vmaxnm_NNNo:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp fast ogt half %0, 12.
+ %cond1 = select i1 %cmp1, half %0, half 12.
+ %cmp2 = fcmp fast ogt half 34., %cond1
+ %cond2 = select i1 %cmp2, half 34., half %cond1
+ ret half %cond2
+}
+
+define half @fp16_vmaxnm_NNNoge(i16 signext %a) {
+; CHECK-LABEL: fp16_vmaxnm_NNNoge:
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp fast oge half %0, 34.
+ %cond1 = select i1 %cmp1, half %0, half 34.
+ %cmp2 = fcmp fast oge half 56., %cond1
+ %cond2 = select i1 %cmp2, half 56., half %cond1
+ ret half %cond2
+}
+
+define half @fp16_vmaxnm_NNNo_rev(i16 signext %a) {
+; CHECK-LABEL: fp16_vmaxnm_NNNo_rev:
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp fast olt half %0, 56.
+ %cond1 = select i1 %cmp1, half 56., half %0
+ %cmp2 = fcmp fast olt half 78., %cond1
+ %cond2 = select i1 %cmp2, half %cond1, half 78.
+ ret half %cond2
+}
+
+define half @fp16_vmaxnm_NNNole_rev(i16 signext %a) {
+; CHECK-LABEL: fp16_vmaxnm_NNNole_rev:
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp fast ole half %0, 78.
+ %cond1 = select i1 %cmp1, half 78., half %0
+ %cmp2 = fcmp fast ole half 90., %cond1
+ %cond2 = select i1 %cmp2, half %cond1, half 90.
+ ret half %cond2
+}
+
+define half @fp16_vmaxnm_NNNu(i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_NNNu:
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], #1.200000e+01
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %b to half
+ %cmp1 = fcmp fast ugt half 12., %0
+ %cond1 = select i1 %cmp1, half 12., half %0
+ %cmp2 = fcmp fast ugt half %cond1, 34.
+ %cond2 = select i1 %cmp2, half %cond1, half 34.
+ ret half %cond2
+}
+
+define half @fp16_vmaxnm_NNNuge(i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_NNNuge:
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %b to half
+ %cmp1 = fcmp fast uge half 34., %0
+ %cond1 = select i1 %cmp1, half 34., half %0
+ %cmp2 = fcmp fast uge half %cond1, 56.
+ %cond2 = select i1 %cmp2, half %cond1, half 56.
+ ret half %cond2
+}
+
+define half @fp16_vmaxnm_NNNu_rev(i16 signext %b) {
+; CHECK-LABEL: fp16_vmaxnm_NNNu_rev:
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vmov.f16 [[S0:s[0-9]]], r{{.}}
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+; CHECK: vldr.16 s2, .LCPI{{.*}}
+; CHECK: vmaxnm.f16 s0, [[S0]], [[S2]]
+entry:
+ %0 = bitcast i16 %b to half
+ %cmp1 = fcmp fast ult half 56., %0
+ %cond1 = select i1 %cmp1, half %0, half 56.
+ %cmp2 = fcmp fast ult half %cond1, 78.
+ %cond2 = select i1 %cmp2, half 78., half %cond1
+ ret half %cond2
+}
+
+define half @fp16_vminmaxnm_0(i16 signext %a) {
+; CHECK-LABEL: fp16_vminmaxnm_0:
+; CHECK: vldr.16 s0, .LCPI{{.*}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s2, s2, s0
+; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp fast olt half %0, 0.
+ %cond1 = select i1 %cmp1, half %0, half 0.
+ %cmp2 = fcmp fast ogt half %cond1, 0.
+ %cond2 = select i1 %cmp2, half %cond1, half 0.
+ ret half %cond2
+}
+
+define half @fp16_vminmaxnm_neg0(i16 signext %a) {
+; CHECK-LABEL: fp16_vminmaxnm_neg0:
+; CHECK: vldr.16 s0, .LCPI{{.*}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s2, s2, s0
+; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp fast olt half %0, -0.
+ %cond1 = select i1 %cmp1, half %0, half -0.
+ %cmp2 = fcmp fast ugt half %cond1, -0.
+ %cond2 = select i1 %cmp2, half %cond1, half -0.
+ ret half %cond2
+}
+
+define half @fp16_vminmaxnm_e_0(i16 signext %a) {
+; CHECK-LABEL: fp16_vminmaxnm_e_0:
+; CHECK: vldr.16 s0, .LCPI{{.*}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s2, s2, s0
+; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp fast ule half 0., %0
+ %cond1 = select i1 %cmp1, half 0., half %0
+ %cmp2 = fcmp fast uge half 0., %cond1
+ %cond2 = select i1 %cmp2, half 0., half %cond1
+ ret half %cond2
+}
+
+define half @fp16_vminmaxnm_e_neg0(i16 signext %a) {
+; CHECK-LABEL: fp16_vminmaxnm_e_neg0:
+; CHECK: vldr.16 s0, .LCPI{{.*}}
+; CHECK: vmov.f16 [[S2:s[0-9]]], r{{.}}
+; CHECK: vminnm.f16 s2, s2, s0
+; CHECK: vmaxnm.f16 s0, [[S2]], [[S0]]
+entry:
+ %0 = bitcast i16 %a to half
+ %cmp1 = fcmp fast ule half -0., %0
+ %cond1 = select i1 %cmp1, half -0., half %0
+ %cmp2 = fcmp fast oge half -0., %cond1
+ %cond2 = select i1 %cmp2, half -0., half %cond1
+ ret half %cond2
+}
More information about the llvm-commits
mailing list