[llvm-branch-commits] [llvm] cff417c - [FPEnv][AArch64] Add lowering of f128 STRICT_FSETCC

Hans Wennborg via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Feb 18 07:55:21 PST 2020


Author: John Brawn
Date: 2020-02-18T16:46:42+01:00
New Revision: cff417cffd61d2c4607eb142e272ec10ebec2c21

URL: https://github.com/llvm/llvm-project/commit/cff417cffd61d2c4607eb142e272ec10ebec2c21
DIFF: https://github.com/llvm/llvm-project/commit/cff417cffd61d2c4607eb142e272ec10ebec2c21.diff

LOG: [FPEnv][AArch64] Add lowering of f128 STRICT_FSETCC

These get lowered to function calls, like the non-strict versions.

Differential Revision: https://reviews.llvm.org/D73784

(cherry picked from commit 68cf574857c81f711f498a479855a17e7bea40f7)

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/fp-intrinsics.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 94b6d1ac7638..23f05eaad944 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -272,6 +272,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
   setOperationAction(ISD::FSUB, MVT::f128, Custom);
   setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
   setOperationAction(ISD::SETCC, MVT::f128, Custom);
+  setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom);
+  setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom);
   setOperationAction(ISD::BR_CC, MVT::f128, Custom);
   setOperationAction(ISD::SELECT, MVT::f128, Custom);
   setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
@@ -5244,7 +5246,6 @@ SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
   }
 
   if (LHS.getValueType().isInteger()) {
-    assert(!IsStrict && "Unexpected integer in strict fp comparison!");
     SDValue CCVal;
     SDValue Cmp = getAArch64Cmp(
         LHS, RHS, ISD::getSetCCInverse(CC, LHS.getValueType()), CCVal, DAG, dl);
@@ -5252,7 +5253,8 @@ SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
     // Note that we inverted the condition above, so we reverse the order of
     // the true and false operands here.  This will allow the setcc to be
     // matched to a single CSINC instruction.
-    return DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CCVal, Cmp);
+    SDValue Res = DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CCVal, Cmp);
+    return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
   }
 
   // Now we know we're dealing with FP values.

diff  --git a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
index d3e17dcf5ff9..3c412a5f7e0e 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
@@ -1197,7 +1197,197 @@ define fp128 @trunc_f128(fp128 %x) #0 {
   ret fp128 %val
 }
 
-; TODO: fcmp (missing STRICT_FSETCC handling)
+; CHECK-LABEL: fcmp_olt_f128:
+; CHECK: bl __lttf2
+define i32 @fcmp_olt_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"olt", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmp_ole_f128:
+; CHECK: bl __letf2
+define i32 @fcmp_ole_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ole", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmp_ogt_f128:
+; CHECK: bl __gttf2
+define i32 @fcmp_ogt_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ogt", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmp_oge_f128:
+; CHECK: bl __getf2
+define i32 @fcmp_oge_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"oge", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmp_oeq_f128:
+; CHECK: bl __eqtf2
+define i32 @fcmp_oeq_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"oeq", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmp_one_f128:
+; CHECK: bl __eqtf2
+define i32 @fcmp_one_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"one", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmp_ult_f128:
+; CHECK: bl __getf2
+define i32 @fcmp_ult_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ult", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmp_ule_f128:
+; CHECK: bl __gttf2
+define i32 @fcmp_ule_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ule", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmp_ugt_f128:
+; CHECK: bl __letf2
+define i32 @fcmp_ugt_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ugt", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmp_uge_f128:
+; CHECK: bl __lttf2
+define i32 @fcmp_uge_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"uge", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmp_ueq_f128:
+; CHECK: bl __eqtf2
+define i32 @fcmp_ueq_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ueq", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmp_une_f128:
+; CHECK: bl __netf2
+define i32 @fcmp_une_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"une", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmps_olt_f128:
+; CHECK: bl __lttf2
+define i32 @fcmps_olt_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"olt", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmps_ole_f128:
+; CHECK: bl __letf2
+define i32 @fcmps_ole_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ole", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmps_ogt_f128:
+; CHECK: bl __gttf2
+define i32 @fcmps_ogt_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ogt", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmps_oge_f128:
+; CHECK: bl __getf2
+define i32 @fcmps_oge_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"oge", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmps_oeq_f128:
+; CHECK: bl __eqtf2
+define i32 @fcmps_oeq_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"oeq", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmps_one_f128:
+; CHECK: bl __eqtf2
+define i32 @fcmps_one_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"one", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmps_ult_f128:
+; CHECK: bl __getf2
+define i32 @fcmps_ult_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ult", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmps_ule_f128:
+; CHECK: bl __gttf2
+define i32 @fcmps_ule_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ule", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmps_ugt_f128:
+; CHECK: bl __letf2
+define i32 @fcmps_ugt_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ugt", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmps_uge_f128:
+; CHECK: bl __lttf2
+define i32 @fcmps_uge_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"uge", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmps_ueq_f128:
+; CHECK: bl __eqtf2
+define i32 @fcmps_ueq_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ueq", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
+
+; CHECK-LABEL: fcmps_une_f128:
+; CHECK: bl __netf2
+define i32 @fcmps_une_f128(fp128 %a, fp128 %b) #0 {
+  %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"une", metadata !"fpexcept.strict") #0
+  %conv = zext i1 %cmp to i32
+  ret i32 %conv
+}
 
 
 ; Intrinsics to convert between floating-point types


        


More information about the llvm-branch-commits mailing list