[llvm] 131b3b9 - [PowerPC] Support constrained scalar fptosi/fptoui

Qiu Chaofan via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 19 22:35:41 PDT 2020


Author: Qiu Chaofan
Date: 2020-08-20T13:29:43+08:00
New Revision: 131b3b9ed4efd11d2e50d2963fd11f5d7c7650f0

URL: https://github.com/llvm/llvm-project/commit/131b3b9ed4efd11d2e50d2963fd11f5d7c7650f0
DIFF: https://github.com/llvm/llvm-project/commit/131b3b9ed4efd11d2e50d2963fd11f5d7c7650f0.diff

LOG: [PowerPC] Support constrained scalar fptosi/fptoui

This patch adds support for constrained scalar fp to int operations on
PowerPC. Besides, this fixes the FP exception bit of quad-precision
convert & truncate instructions.

Reviewed By: steven.zhang, uweigand

Differential Revision: https://reviews.llvm.org/D81537

Added: 
    llvm/test/CodeGen/PowerPC/fp-strict-conv-f128.ll
    llvm/test/CodeGen/PowerPC/fp-strict-conv.ll

Modified: 
    llvm/lib/Target/PowerPC/PPCISelLowering.cpp
    llvm/lib/Target/PowerPC/PPCISelLowering.h
    llvm/lib/Target/PowerPC/PPCInstr64Bit.td
    llvm/lib/Target/PowerPC/PPCInstrInfo.td
    llvm/lib/Target/PowerPC/PPCInstrVSX.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 986fe4c6493f..c2669b82cfdd 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -450,6 +450,7 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
   } else {
     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
+    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
 
     // PowerPC does not have [U|S]INT_TO_FP
@@ -582,12 +583,15 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
 
   if (Subtarget.has64BitSupport()) {
     // They also have instructions for converting between i64 and fp.
+    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
+    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand);
     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
     // This is just the low 32 bits of a (signed) fp->i64 conversion.
     // We cannot do this with Promote because i64 is not a legal type.
+    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
 
     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
@@ -597,19 +601,25 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
     if (Subtarget.hasSPE()) {
       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
-    } else
+    } else {
+      setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand);
       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
+    }
   }
 
   // With the instructions enabled under FPCVT, we can do everything.
   if (Subtarget.hasFPCVT()) {
     if (Subtarget.has64BitSupport()) {
+      setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
+      setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
     }
 
+    setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
+    setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
@@ -1464,6 +1474,14 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
   case PPCISD::MAT_PCREL_ADDR:  return "PPCISD::MAT_PCREL_ADDR";
   case PPCISD::LD_SPLAT:        return "PPCISD::LD_SPLAT";
   case PPCISD::FNMSUB:          return "PPCISD::FNMSUB";
+  case PPCISD::STRICT_FCTIDZ:
+    return "PPCISD::STRICT_FCTIDZ";
+  case PPCISD::STRICT_FCTIWZ:
+    return "PPCISD::STRICT_FCTIWZ";
+  case PPCISD::STRICT_FCTIDUZ:
+    return "PPCISD::STRICT_FCTIDUZ";
+  case PPCISD::STRICT_FCTIWUZ:
+    return "PPCISD::STRICT_FCTIWUZ";
   }
   return nullptr;
 }
@@ -7938,28 +7956,57 @@ SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
   return Op;
 }
 
+static unsigned getPPCStrictOpcode(unsigned Opc) {
+  switch (Opc) {
+  default:
+    llvm_unreachable("No strict version of this opcode!");
+  case PPCISD::FCTIDZ:
+    return PPCISD::STRICT_FCTIDZ;
+  case PPCISD::FCTIWZ:
+    return PPCISD::STRICT_FCTIWZ;
+  case PPCISD::FCTIDUZ:
+    return PPCISD::STRICT_FCTIDUZ;
+  case PPCISD::FCTIWUZ:
+    return PPCISD::STRICT_FCTIWUZ;
+  }
+}
+
 static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG,
                               const PPCSubtarget &Subtarget) {
   SDLoc dl(Op);
-  bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
-  SDValue Src = Op.getOperand(0);
+  bool IsStrict = Op->isStrictFPOpcode();
+  bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
+                  Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
+  // For strict nodes, source is the second operand.
+  SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
+  SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
   assert(Src.getValueType().isFloatingPoint());
-  if (Src.getValueType() == MVT::f32)
-    Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
+  if (Src.getValueType() == MVT::f32) {
+    if (IsStrict) {
+      Src = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::f64, MVT::Other},
+                        {Chain, Src});
+      Chain = Src.getValue(1);
+    } else
+      Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
+  }
   SDValue Conv;
+  unsigned Opc = ISD::DELETED_NODE;
   switch (Op.getSimpleValueType().SimpleTy) {
   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
   case MVT::i32:
-    Conv = DAG.getNode(
-        IsSigned ? PPCISD::FCTIWZ
-                 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
-        dl, MVT::f64, Src);
+    Opc = IsSigned ? PPCISD::FCTIWZ
+                   : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ);
     break;
   case MVT::i64:
     assert((IsSigned || Subtarget.hasFPCVT()) &&
            "i64 FP_TO_UINT is supported only with FPCVT");
-    Conv = DAG.getNode(IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ, dl,
-                       MVT::f64, Src);
+    Opc = IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ;
+  }
+  if (IsStrict) {
+    Opc = getPPCStrictOpcode(Opc);
+    Conv = DAG.getNode(Opc, dl, {MVT::f64, MVT::Other}, {Chain, Src});
+  } else {
+    Conv = DAG.getNode(Opc, dl, MVT::f64, Src);
   }
   return Conv;
 }
@@ -7968,7 +8015,9 @@ void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
                                                SelectionDAG &DAG,
                                                const SDLoc &dl) const {
   SDValue Tmp = convertFPToInt(Op, DAG, Subtarget);
-  bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
+  bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
+                  Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
+  bool IsStrict = Op->isStrictFPOpcode();
 
   // Convert the FP value to an int value through memory.
   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
@@ -7979,18 +8028,18 @@ void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
 
   // Emit a store to the stack slot.
-  SDValue Chain;
+  SDValue Chain = IsStrict ? Tmp.getValue(1) : DAG.getEntryNode();
   Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
   if (i32Stack) {
     MachineFunction &MF = DAG.getMachineFunction();
     Alignment = Align(4);
     MachineMemOperand *MMO =
         MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
-    SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
+    SDValue Ops[] = { Chain, Tmp, FIPtr };
     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
   } else
-    Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI, Alignment);
+    Chain = DAG.getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment);
 
   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
   // add in a bias on big endian.
@@ -8012,23 +8061,29 @@ void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
                                                     SelectionDAG &DAG,
                                                     const SDLoc &dl) const {
-  assert(Op.getOperand(0).getValueType().isFloatingPoint());
-  return DAG.getNode(PPCISD::MFVSR, dl, Op.getSimpleValueType().SimpleTy,
-                     convertFPToInt(Op, DAG, Subtarget));
+  SDValue Conv = convertFPToInt(Op, DAG, Subtarget);
+  SDValue Mov = DAG.getNode(PPCISD::MFVSR, dl, Op.getValueType(), Conv);
+  if (Op->isStrictFPOpcode())
+    return DAG.getMergeValues({Mov, Conv.getValue(1)}, dl);
+  else
+    return Mov;
 }
 
 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
                                           const SDLoc &dl) const {
-  SDValue Src = Op.getOperand(0);
+  bool IsStrict = Op->isStrictFPOpcode();
+  bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
+                  Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
+  SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
   // FP to INT conversions are legal for f128.
   if (Src.getValueType() == MVT::f128)
     return Op;
 
   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
   // PPC (the libcall is not available).
-  if (Src.getValueType() == MVT::ppcf128) {
+  if (Src.getValueType() == MVT::ppcf128 && !IsStrict) {
     if (Op.getValueType() == MVT::i32) {
-      if (Op.getOpcode() == ISD::FP_TO_SINT) {
+      if (IsSigned) {
         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
                                  DAG.getIntPtrConstant(0, dl));
         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
@@ -8039,8 +8094,7 @@ SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
 
         // Now use a smaller FP_TO_SINT.
         return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
-      }
-      if (Op.getOpcode() == ISD::FP_TO_UINT) {
+      } else {
         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
         SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128);
@@ -10458,6 +10512,8 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
   case ISD::STORE:              return LowerSTORE(Op, DAG);
   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
+  case ISD::STRICT_FP_TO_UINT:
+  case ISD::STRICT_FP_TO_SINT:
   case ISD::FP_TO_UINT:
   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
   case ISD::UINT_TO_FP:
@@ -10548,10 +10604,13 @@ void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
     }
     return;
   }
+  case ISD::STRICT_FP_TO_SINT:
+  case ISD::STRICT_FP_TO_UINT:
   case ISD::FP_TO_SINT:
   case ISD::FP_TO_UINT:
     // LowerFP_TO_INT() can only handle f32 and f64.
-    if (N->getOperand(0).getValueType() == MVT::ppcf128)
+    if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
+        MVT::ppcf128)
       return;
     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
     return;

diff  --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 7e9915c04b6a..df66f16ad346 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -436,6 +436,12 @@ namespace llvm {
     /// PLD.
     MAT_PCREL_ADDR,
 
+    // Constrained conversion from floating point to int
+    STRICT_FCTIDZ = ISD::FIRST_TARGET_STRICTFP_OPCODE,
+    STRICT_FCTIWZ,
+    STRICT_FCTIDUZ,
+    STRICT_FCTIWUZ,
+
     /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
     /// byte-swapping store instruction.  It byte-swaps the low "Type" bits of
     /// the GPRC input, then stores it through Ptr.  Type can be either i16 or

diff  --git a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
index 6956c40a70be..d473dbbca3d7 100644
--- a/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
+++ b/llvm/lib/Target/PowerPC/PPCInstr64Bit.td
@@ -1457,7 +1457,7 @@ defm FCTIDU : XForm_26r<63, 942, (outs f8rc:$frD), (ins f8rc:$frB),
                         []>, isPPC64;
 defm FCTIDZ : XForm_26r<63, 815, (outs f8rc:$frD), (ins f8rc:$frB),
                         "fctidz", "$frD, $frB", IIC_FPGeneral,
-                        [(set f64:$frD, (PPCfctidz f64:$frB))]>, isPPC64;
+                        [(set f64:$frD, (PPCany_fctidz f64:$frB))]>, isPPC64;
 
 defm FCFIDU  : XForm_26r<63, 974, (outs f8rc:$frD), (ins f8rc:$frB),
                         "fcfidu", "$frD, $frB", IIC_FPGeneral,
@@ -1470,10 +1470,10 @@ defm FCFIDUS : XForm_26r<59, 974, (outs f4rc:$frD), (ins f8rc:$frB),
                         [(set f32:$frD, (PPCfcfidus f64:$frB))]>, isPPC64;
 defm FCTIDUZ : XForm_26r<63, 943, (outs f8rc:$frD), (ins f8rc:$frB),
                         "fctiduz", "$frD, $frB", IIC_FPGeneral,
-                        [(set f64:$frD, (PPCfctiduz f64:$frB))]>, isPPC64;
+                        [(set f64:$frD, (PPCany_fctiduz f64:$frB))]>, isPPC64;
 defm FCTIWUZ : XForm_26r<63, 143, (outs f8rc:$frD), (ins f8rc:$frB),
                         "fctiwuz", "$frD, $frB", IIC_FPGeneral,
-                        [(set f64:$frD, (PPCfctiwuz f64:$frB))]>, isPPC64;
+                        [(set f64:$frD, (PPCany_fctiwuz f64:$frB))]>, isPPC64;
 }
 
 

diff  --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
index fa6f911f83ad..07dd1b68f9bc 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
@@ -215,6 +215,28 @@ def PPCfnmsub     : SDNode<"PPCISD::FNMSUB"    , SDTFPTernaryOp>;
 
 def PPCextswsli : SDNode<"PPCISD::EXTSWSLI" , SDT_PPCextswsli>;
 
+def PPCstrict_fctidz : SDNode<"PPCISD::STRICT_FCTIDZ",
+                              SDTFPUnaryOp, [SDNPHasChain]>;
+def PPCstrict_fctiwz : SDNode<"PPCISD::STRICT_FCTIWZ",
+                              SDTFPUnaryOp, [SDNPHasChain]>;
+def PPCstrict_fctiduz : SDNode<"PPCISD::STRICT_FCTIDUZ",
+                               SDTFPUnaryOp, [SDNPHasChain]>;
+def PPCstrict_fctiwuz : SDNode<"PPCISD::STRICT_FCTIWUZ",
+                                SDTFPUnaryOp, [SDNPHasChain]>;
+
+def PPCany_fctidz : PatFrags<(ops node:$op),
+                             [(PPCstrict_fctidz node:$op),
+                              (PPCfctidz node:$op)]>;
+def PPCany_fctiwz : PatFrags<(ops node:$op),
+                             [(PPCstrict_fctiwz node:$op),
+                              (PPCfctiwz node:$op)]>;
+def PPCany_fctiduz : PatFrags<(ops node:$op),
+                              [(PPCstrict_fctiduz node:$op),
+                               (PPCfctiduz node:$op)]>;
+def PPCany_fctiwuz : PatFrags<(ops node:$op),
+                              [(PPCstrict_fctiwuz node:$op),
+                               (PPCfctiwuz node:$op)]>;
+
 // Move 2 i64 values into a VSX register
 def PPCbuild_fp128: SDNode<"PPCISD::BUILD_FP128",
                            SDTypeProfile<1, 2,
@@ -2632,7 +2654,7 @@ let Uses = [RM], mayRaiseFPException = 1, hasSideEffects = 0 in {
                           []>;
   defm FCTIWZ : XForm_26r<63, 15, (outs f8rc:$frD), (ins f8rc:$frB),
                           "fctiwz", "$frD, $frB", IIC_FPGeneral,
-                          [(set f64:$frD, (PPCfctiwz f64:$frB))]>;
+                          [(set f64:$frD, (PPCany_fctiwz f64:$frB))]>;
 
   defm FRSP   : XForm_26r<63, 12, (outs f4rc:$frD), (ins f8rc:$frB),
                           "frsp", "$frD, $frB", IIC_FPGeneral,

diff  --git a/llvm/lib/Target/PowerPC/PPCInstrVSX.td b/llvm/lib/Target/PowerPC/PPCInstrVSX.td
index 272f8b1c0aac..3a83b8b98af5 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrVSX.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrVSX.td
@@ -769,39 +769,39 @@ let hasSideEffects = 0 in {
   def XSCVDPSXDS : XX2Form<60, 344,
                       (outs vsfrc:$XT), (ins vsfrc:$XB),
                       "xscvdpsxds $XT, $XB", IIC_VecFP,
-                      [(set f64:$XT, (PPCfctidz f64:$XB))]>;
+                      [(set f64:$XT, (PPCany_fctidz f64:$XB))]>;
   let isCodeGenOnly = 1 in
   def XSCVDPSXDSs : XX2Form<60, 344,
                       (outs vssrc:$XT), (ins vssrc:$XB),
                       "xscvdpsxds $XT, $XB", IIC_VecFP,
-                      [(set f32:$XT, (PPCfctidz f32:$XB))]>;
+                      [(set f32:$XT, (PPCany_fctidz f32:$XB))]>;
   def XSCVDPSXWS : XX2Form<60, 88,
                       (outs vsfrc:$XT), (ins vsfrc:$XB),
                       "xscvdpsxws $XT, $XB", IIC_VecFP,
-                      [(set f64:$XT, (PPCfctiwz f64:$XB))]>;
+                      [(set f64:$XT, (PPCany_fctiwz f64:$XB))]>;
   let isCodeGenOnly = 1 in
   def XSCVDPSXWSs : XX2Form<60, 88,
                       (outs vssrc:$XT), (ins vssrc:$XB),
                       "xscvdpsxws $XT, $XB", IIC_VecFP,
-                      [(set f32:$XT, (PPCfctiwz f32:$XB))]>;
+                      [(set f32:$XT, (PPCany_fctiwz f32:$XB))]>;
   def XSCVDPUXDS : XX2Form<60, 328,
                       (outs vsfrc:$XT), (ins vsfrc:$XB),
                       "xscvdpuxds $XT, $XB", IIC_VecFP,
-                      [(set f64:$XT, (PPCfctiduz f64:$XB))]>;
+                      [(set f64:$XT, (PPCany_fctiduz f64:$XB))]>;
   let isCodeGenOnly = 1 in
   def XSCVDPUXDSs : XX2Form<60, 328,
                       (outs vssrc:$XT), (ins vssrc:$XB),
                       "xscvdpuxds $XT, $XB", IIC_VecFP,
-                      [(set f32:$XT, (PPCfctiduz f32:$XB))]>;
+                      [(set f32:$XT, (PPCany_fctiduz f32:$XB))]>;
   def XSCVDPUXWS : XX2Form<60, 72,
                       (outs vsfrc:$XT), (ins vsfrc:$XB),
                       "xscvdpuxws $XT, $XB", IIC_VecFP,
-                      [(set f64:$XT, (PPCfctiwuz f64:$XB))]>;
+                      [(set f64:$XT, (PPCany_fctiwuz f64:$XB))]>;
   let isCodeGenOnly = 1 in
   def XSCVDPUXWSs : XX2Form<60, 72,
                       (outs vssrc:$XT), (ins vssrc:$XB),
                       "xscvdpuxws $XT, $XB", IIC_VecFP,
-                      [(set f32:$XT, (PPCfctiwuz f32:$XB))]>;
+                      [(set f32:$XT, (PPCany_fctiwuz f32:$XB))]>;
   def XSCVSPDP : XX2Form<60, 329,
                       (outs vsfrc:$XT), (ins vsfrc:$XB),
                       "xscvspdp $XT, $XB", IIC_VecFP, []>;
@@ -1479,9 +1479,8 @@ let Predicates = [HasVSX, HasP9Vector] in {
                                           f128:$vB))]>;
   }
 
-  // FIXME: Setting the hasSideEffects flag here to match current behaviour.
   // Truncate & Convert QP -> (Un)Signed (D)Word (dword[1] is set to zero)
-  let hasSideEffects = 1 in {
+  let mayRaiseFPException = 1 in {
     def XSCVQPSDZ : X_VT5_XO5_VB5<63, 25, 836, "xscvqpsdz", []>;
     def XSCVQPSWZ : X_VT5_XO5_VB5<63,  9, 836, "xscvqpswz", []>;
     def XSCVQPUDZ : X_VT5_XO5_VB5<63, 17, 836, "xscvqpudz", []>;
@@ -3764,11 +3763,11 @@ def : Pat<(f128 (uint_to_fp ScalarLoads.ZELi8)),
           (f128 (XSCVUDQP (LXSIBZX xoaddr:$src)))>;
 
 // Truncate & Convert QP -> (Un)Signed (D)Word.
-def : Pat<(i64 (fp_to_sint f128:$src)), (i64 (MFVRD (XSCVQPSDZ $src)))>;
-def : Pat<(i64 (fp_to_uint f128:$src)), (i64 (MFVRD (XSCVQPUDZ $src)))>;
-def : Pat<(i32 (fp_to_sint f128:$src)),
+def : Pat<(i64 (any_fp_to_sint f128:$src)), (i64 (MFVRD (XSCVQPSDZ $src)))>;
+def : Pat<(i64 (any_fp_to_uint f128:$src)), (i64 (MFVRD (XSCVQPUDZ $src)))>;
+def : Pat<(i32 (any_fp_to_sint f128:$src)),
           (i32 (MFVSRWZ (COPY_TO_REGCLASS (XSCVQPSWZ $src), VFRC)))>;
-def : Pat<(i32 (fp_to_uint f128:$src)),
+def : Pat<(i32 (any_fp_to_uint f128:$src)),
           (i32 (MFVSRWZ (COPY_TO_REGCLASS (XSCVQPUWZ $src), VFRC)))>;
 
 // Instructions for store(fptosi).

diff  --git a/llvm/test/CodeGen/PowerPC/fp-strict-conv-f128.ll b/llvm/test/CodeGen/PowerPC/fp-strict-conv-f128.ll
new file mode 100644
index 000000000000..4a4f5c00fd6c
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/fp-strict-conv-f128.ll
@@ -0,0 +1,602 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
+; RUN:   < %s -mtriple=powerpc64-unknown-linux -mcpu=pwr8 | FileCheck %s\
+; RUN:   -check-prefix=P8
+; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
+; RUN:   < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 | FileCheck %s \
+; RUN:   -check-prefix=P9
+; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
+; RUN:   < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr8 -mattr=-vsx \
+; RUN:   | FileCheck %s -check-prefix=NOVSX
+; RUN: llc -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 < %s -simplify-mir \
+; RUN:   -stop-after=machine-cp | FileCheck %s -check-prefix=MIR
+
+declare i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128, metadata)
+declare i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128, metadata)
+declare i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128, metadata)
+declare i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128, metadata)
+
+declare i32 @llvm.experimental.constrained.fptosi.i32.ppcf128(ppc_fp128, metadata)
+declare i64 @llvm.experimental.constrained.fptosi.i64.ppcf128(ppc_fp128, metadata)
+declare i64 @llvm.experimental.constrained.fptoui.i64.ppcf128(ppc_fp128, metadata)
+declare i32 @llvm.experimental.constrained.fptoui.i32.ppcf128(ppc_fp128, metadata)
+
+declare i128 @llvm.experimental.constrained.fptosi.i128.ppcf128(ppc_fp128, metadata)
+declare i128 @llvm.experimental.constrained.fptoui.i128.ppcf128(ppc_fp128, metadata)
+declare i128 @llvm.experimental.constrained.fptosi.i128.f128(fp128, metadata)
+declare i128 @llvm.experimental.constrained.fptoui.i128.f128(fp128, metadata)
+
+define i128 @q_to_i128(fp128 %m) #0 {
+; P8-LABEL: q_to_i128:
+; P8:       # %bb.0: # %entry
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    .cfi_def_cfa_offset 112
+; P8-NEXT:    .cfi_offset lr, 16
+; P8-NEXT:    bl __fixtfti
+; P8-NEXT:    nop
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: q_to_i128:
+; P9:       # %bb.0: # %entry
+; P9-NEXT:    mflr r0
+; P9-NEXT:    std r0, 16(r1)
+; P9-NEXT:    stdu r1, -32(r1)
+; P9-NEXT:    .cfi_def_cfa_offset 32
+; P9-NEXT:    .cfi_offset lr, 16
+; P9-NEXT:    bl __fixtfti
+; P9-NEXT:    nop
+; P9-NEXT:    addi r1, r1, 32
+; P9-NEXT:    ld r0, 16(r1)
+; P9-NEXT:    mtlr r0
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: q_to_i128:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    .cfi_def_cfa_offset 32
+; NOVSX-NEXT:    .cfi_offset lr, 16
+; NOVSX-NEXT:    bl __fixtfti
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+entry:
+  %conv = tail call i128 @llvm.experimental.constrained.fptosi.i128.f128(fp128 %m, metadata !"fpexcept.strict") #0
+  ret i128 %conv
+}
+
+define i128 @q_to_u128(fp128 %m) #0 {
+; P8-LABEL: q_to_u128:
+; P8:       # %bb.0: # %entry
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    .cfi_def_cfa_offset 112
+; P8-NEXT:    .cfi_offset lr, 16
+; P8-NEXT:    bl __fixunstfti
+; P8-NEXT:    nop
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: q_to_u128:
+; P9:       # %bb.0: # %entry
+; P9-NEXT:    mflr r0
+; P9-NEXT:    std r0, 16(r1)
+; P9-NEXT:    stdu r1, -32(r1)
+; P9-NEXT:    .cfi_def_cfa_offset 32
+; P9-NEXT:    .cfi_offset lr, 16
+; P9-NEXT:    bl __fixunstfti
+; P9-NEXT:    nop
+; P9-NEXT:    addi r1, r1, 32
+; P9-NEXT:    ld r0, 16(r1)
+; P9-NEXT:    mtlr r0
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: q_to_u128:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    .cfi_def_cfa_offset 32
+; NOVSX-NEXT:    .cfi_offset lr, 16
+; NOVSX-NEXT:    bl __fixunstfti
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+entry:
+  %conv = tail call i128 @llvm.experimental.constrained.fptoui.i128.f128(fp128 %m, metadata !"fpexcept.strict") #0
+  ret i128 %conv
+}
+
+define i128 @ppcq_to_i128(ppc_fp128 %m) #0 {
+; P8-LABEL: ppcq_to_i128:
+; P8:       # %bb.0: # %entry
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    .cfi_def_cfa_offset 112
+; P8-NEXT:    .cfi_offset lr, 16
+; P8-NEXT:    bl __fixtfti
+; P8-NEXT:    nop
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: ppcq_to_i128:
+; P9:       # %bb.0: # %entry
+; P9-NEXT:    mflr r0
+; P9-NEXT:    std r0, 16(r1)
+; P9-NEXT:    stdu r1, -32(r1)
+; P9-NEXT:    .cfi_def_cfa_offset 32
+; P9-NEXT:    .cfi_offset lr, 16
+; P9-NEXT:    bl __fixtfti
+; P9-NEXT:    nop
+; P9-NEXT:    addi r1, r1, 32
+; P9-NEXT:    ld r0, 16(r1)
+; P9-NEXT:    mtlr r0
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: ppcq_to_i128:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    .cfi_def_cfa_offset 32
+; NOVSX-NEXT:    .cfi_offset lr, 16
+; NOVSX-NEXT:    bl __fixtfti
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+entry:
+  %conv = tail call i128 @llvm.experimental.constrained.fptosi.i128.ppcf128(ppc_fp128 %m, metadata !"fpexcept.strict") #0
+  ret i128 %conv
+}
+
+define i128 @ppcq_to_u128(ppc_fp128 %m) #0 {
+; P8-LABEL: ppcq_to_u128:
+; P8:       # %bb.0: # %entry
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    .cfi_def_cfa_offset 112
+; P8-NEXT:    .cfi_offset lr, 16
+; P8-NEXT:    bl __fixtfti
+; P8-NEXT:    nop
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: ppcq_to_u128:
+; P9:       # %bb.0: # %entry
+; P9-NEXT:    mflr r0
+; P9-NEXT:    std r0, 16(r1)
+; P9-NEXT:    stdu r1, -32(r1)
+; P9-NEXT:    .cfi_def_cfa_offset 32
+; P9-NEXT:    .cfi_offset lr, 16
+; P9-NEXT:    bl __fixtfti
+; P9-NEXT:    nop
+; P9-NEXT:    addi r1, r1, 32
+; P9-NEXT:    ld r0, 16(r1)
+; P9-NEXT:    mtlr r0
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: ppcq_to_u128:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    .cfi_def_cfa_offset 32
+; NOVSX-NEXT:    .cfi_offset lr, 16
+; NOVSX-NEXT:    bl __fixtfti
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+entry:
+  %conv = tail call i128 @llvm.experimental.constrained.fptosi.i128.ppcf128(ppc_fp128 %m, metadata !"fpexcept.strict") #0
+  ret i128 %conv
+}
+
+define signext i32 @q_to_i32(fp128 %m) #0 {
+; P8-LABEL: q_to_i32:
+; P8:       # %bb.0: # %entry
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    .cfi_def_cfa_offset 112
+; P8-NEXT:    .cfi_offset lr, 16
+; P8-NEXT:    bl __fixkfsi
+; P8-NEXT:    nop
+; P8-NEXT:    extsw r3, r3
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: q_to_i32:
+; P9:       # %bb.0: # %entry
+; P9-NEXT:    xscvqpswz v2, v2
+; P9-NEXT:    mfvsrwz r3, v2
+; P9-NEXT:    extsw r3, r3
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: q_to_i32:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    .cfi_def_cfa_offset 32
+; NOVSX-NEXT:    .cfi_offset lr, 16
+; NOVSX-NEXT:    bl __fixkfsi
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    extsw r3, r3
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+;
+; MIR-LABEL: name: q_to_i32
+; MIR: renamable $v{{[0-9]+}} = XSCVQPSWZ
+; MIR-NEXT: renamable $r{{[0-9]+}} = MFVSRWZ
+entry:
+  %conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128 %m, metadata !"fpexcept.strict") #0
+  ret i32 %conv
+}
+
+define i64 @q_to_i64(fp128 %m) #0 {
+; P8-LABEL: q_to_i64:
+; P8:       # %bb.0: # %entry
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    .cfi_def_cfa_offset 112
+; P8-NEXT:    .cfi_offset lr, 16
+; P8-NEXT:    bl __fixkfdi
+; P8-NEXT:    nop
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: q_to_i64:
+; P9:       # %bb.0: # %entry
+; P9-NEXT:    xscvqpsdz v2, v2
+; P9-NEXT:    mfvsrd r3, v2
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: q_to_i64:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    .cfi_def_cfa_offset 32
+; NOVSX-NEXT:    .cfi_offset lr, 16
+; NOVSX-NEXT:    bl __fixkfdi
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+;
+; MIR-LABEL: name: q_to_i64
+; MIR: renamable $v{{[0-9]+}} = XSCVQPSDZ
+; MIR-NEXT: renamable $x{{[0-9]+}} = MFVRD
+entry:
+  %conv = tail call i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128 %m, metadata !"fpexcept.strict") #0
+  ret i64 %conv
+}
+
+define i64 @q_to_u64(fp128 %m) #0 {
+; P8-LABEL: q_to_u64:
+; P8:       # %bb.0: # %entry
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    .cfi_def_cfa_offset 112
+; P8-NEXT:    .cfi_offset lr, 16
+; P8-NEXT:    bl __fixunskfdi
+; P8-NEXT:    nop
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: q_to_u64:
+; P9:       # %bb.0: # %entry
+; P9-NEXT:    xscvqpudz v2, v2
+; P9-NEXT:    mfvsrd r3, v2
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: q_to_u64:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    .cfi_def_cfa_offset 32
+; NOVSX-NEXT:    .cfi_offset lr, 16
+; NOVSX-NEXT:    bl __fixunskfdi
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+;
+; MIR-LABEL: name: q_to_u64
+; MIR: renamable $v{{[0-9]+}} = XSCVQPUDZ
+; MIR-NEXT: renamable $x{{[0-9]+}} = MFVRD
+entry:
+  %conv = tail call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %m, metadata !"fpexcept.strict") #0
+  ret i64 %conv
+}
+
+define zeroext i32 @q_to_u32(fp128 %m) #0 {
+; P8-LABEL: q_to_u32:
+; P8:       # %bb.0: # %entry
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    .cfi_def_cfa_offset 112
+; P8-NEXT:    .cfi_offset lr, 16
+; P8-NEXT:    bl __fixunskfsi
+; P8-NEXT:    nop
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: q_to_u32:
+; P9:       # %bb.0: # %entry
+; P9-NEXT:    xscvqpuwz v2, v2
+; P9-NEXT:    mfvsrwz r3, v2
+; P9-NEXT:    clrldi r3, r3, 32
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: q_to_u32:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    .cfi_def_cfa_offset 32
+; NOVSX-NEXT:    .cfi_offset lr, 16
+; NOVSX-NEXT:    bl __fixunskfsi
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+;
+; MIR-LABEL: name: q_to_u32
+; MIR: renamable $v{{[0-9]+}} = XSCVQPUWZ
+; MIR-NEXT: renamable $r{{[0-9]+}} = MFVSRWZ
+entry:
+  %conv = tail call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %m, metadata !"fpexcept.strict") #0
+  ret i32 %conv
+}
+
+define signext i32 @ppcq_to_i32(ppc_fp128 %m) #0 {
+; P8-LABEL: ppcq_to_i32:
+; P8:       # %bb.0: # %entry
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    .cfi_def_cfa_offset 112
+; P8-NEXT:    .cfi_offset lr, 16
+; P8-NEXT:    bl __gcc_qtou
+; P8-NEXT:    nop
+; P8-NEXT:    extsw r3, r3
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: ppcq_to_i32:
+; P9:       # %bb.0: # %entry
+; P9-NEXT:    mflr r0
+; P9-NEXT:    std r0, 16(r1)
+; P9-NEXT:    stdu r1, -32(r1)
+; P9-NEXT:    .cfi_def_cfa_offset 32
+; P9-NEXT:    .cfi_offset lr, 16
+; P9-NEXT:    bl __gcc_qtou
+; P9-NEXT:    nop
+; P9-NEXT:    extsw r3, r3
+; P9-NEXT:    addi r1, r1, 32
+; P9-NEXT:    ld r0, 16(r1)
+; P9-NEXT:    mtlr r0
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: ppcq_to_i32:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    .cfi_def_cfa_offset 32
+; NOVSX-NEXT:    .cfi_offset lr, 16
+; NOVSX-NEXT:    bl __gcc_qtou
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    extsw r3, r3
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+entry:
+  %conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.ppcf128(ppc_fp128 %m, metadata !"fpexcept.strict") #0
+  ret i32 %conv
+}
+
+define i64 @ppcq_to_i64(ppc_fp128 %m) #0 {
+; P8-LABEL: ppcq_to_i64:
+; P8:       # %bb.0: # %entry
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    .cfi_def_cfa_offset 112
+; P8-NEXT:    .cfi_offset lr, 16
+; P8-NEXT:    bl __fixtfdi
+; P8-NEXT:    nop
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: ppcq_to_i64:
+; P9:       # %bb.0: # %entry
+; P9-NEXT:    mflr r0
+; P9-NEXT:    std r0, 16(r1)
+; P9-NEXT:    stdu r1, -32(r1)
+; P9-NEXT:    .cfi_def_cfa_offset 32
+; P9-NEXT:    .cfi_offset lr, 16
+; P9-NEXT:    bl __fixtfdi
+; P9-NEXT:    nop
+; P9-NEXT:    addi r1, r1, 32
+; P9-NEXT:    ld r0, 16(r1)
+; P9-NEXT:    mtlr r0
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: ppcq_to_i64:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    .cfi_def_cfa_offset 32
+; NOVSX-NEXT:    .cfi_offset lr, 16
+; NOVSX-NEXT:    bl __fixtfdi
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+entry:
+  %conv = tail call i64 @llvm.experimental.constrained.fptosi.i64.ppcf128(ppc_fp128 %m, metadata !"fpexcept.strict") #0
+  ret i64 %conv
+}
+
+define i64 @ppcq_to_u64(ppc_fp128 %m) #0 {
+; P8-LABEL: ppcq_to_u64:
+; P8:       # %bb.0: # %entry
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    .cfi_def_cfa_offset 112
+; P8-NEXT:    .cfi_offset lr, 16
+; P8-NEXT:    bl __fixunstfdi
+; P8-NEXT:    nop
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: ppcq_to_u64:
+; P9:       # %bb.0: # %entry
+; P9-NEXT:    mflr r0
+; P9-NEXT:    std r0, 16(r1)
+; P9-NEXT:    stdu r1, -32(r1)
+; P9-NEXT:    .cfi_def_cfa_offset 32
+; P9-NEXT:    .cfi_offset lr, 16
+; P9-NEXT:    bl __fixunstfdi
+; P9-NEXT:    nop
+; P9-NEXT:    addi r1, r1, 32
+; P9-NEXT:    ld r0, 16(r1)
+; P9-NEXT:    mtlr r0
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: ppcq_to_u64:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    .cfi_def_cfa_offset 32
+; NOVSX-NEXT:    .cfi_offset lr, 16
+; NOVSX-NEXT:    bl __fixunstfdi
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+entry:
+  %conv = tail call i64 @llvm.experimental.constrained.fptoui.i64.ppcf128(ppc_fp128 %m, metadata !"fpexcept.strict") #0
+  ret i64 %conv
+}
+
+define zeroext i32 @ppcq_to_u32(ppc_fp128 %m) #0 {
+; P8-LABEL: ppcq_to_u32:
+; P8:       # %bb.0: # %entry
+; P8-NEXT:    mflr r0
+; P8-NEXT:    std r0, 16(r1)
+; P8-NEXT:    stdu r1, -112(r1)
+; P8-NEXT:    .cfi_def_cfa_offset 112
+; P8-NEXT:    .cfi_offset lr, 16
+; P8-NEXT:    bl __fixunstfsi
+; P8-NEXT:    nop
+; P8-NEXT:    addi r1, r1, 112
+; P8-NEXT:    ld r0, 16(r1)
+; P8-NEXT:    mtlr r0
+; P8-NEXT:    blr
+;
+; P9-LABEL: ppcq_to_u32:
+; P9:       # %bb.0: # %entry
+; P9-NEXT:    mflr r0
+; P9-NEXT:    std r0, 16(r1)
+; P9-NEXT:    stdu r1, -32(r1)
+; P9-NEXT:    .cfi_def_cfa_offset 32
+; P9-NEXT:    .cfi_offset lr, 16
+; P9-NEXT:    bl __fixunstfsi
+; P9-NEXT:    nop
+; P9-NEXT:    addi r1, r1, 32
+; P9-NEXT:    ld r0, 16(r1)
+; P9-NEXT:    mtlr r0
+; P9-NEXT:    blr
+;
+; NOVSX-LABEL: ppcq_to_u32:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    mflr r0
+; NOVSX-NEXT:    std r0, 16(r1)
+; NOVSX-NEXT:    stdu r1, -32(r1)
+; NOVSX-NEXT:    .cfi_def_cfa_offset 32
+; NOVSX-NEXT:    .cfi_offset lr, 16
+; NOVSX-NEXT:    bl __fixunstfsi
+; NOVSX-NEXT:    nop
+; NOVSX-NEXT:    addi r1, r1, 32
+; NOVSX-NEXT:    ld r0, 16(r1)
+; NOVSX-NEXT:    mtlr r0
+; NOVSX-NEXT:    blr
+entry:
+  %conv = tail call i32 @llvm.experimental.constrained.fptoui.i32.ppcf128(ppc_fp128 %m, metadata !"fpexcept.strict") #0
+  ret i32 %conv
+}
+
+define void @fptoint_nofpexcept(fp128 %m, i32* %addr1, i64* %addr2) {
+; MIR-LABEL: name: fptoint_nofpexcept
+; MIR: renamable $v{{[0-9]+}} = nofpexcept XSCVQPSWZ
+; MIR: renamable $v{{[0-9]+}} = nofpexcept XSCVQPUWZ
+; MIR: renamable $v{{[0-9]+}} = nofpexcept XSCVQPSDZ
+; MIR: renamable $v{{[0-9]+}} = nofpexcept XSCVQPUDZ
+entry:
+  %conv1 = tail call i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128 %m, metadata !"fpexcept.ignore") #0
+  store volatile i32 %conv1, i32* %addr1, align 4
+  %conv2 = tail call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %m, metadata !"fpexcept.ignore") #0
+  store volatile i32 %conv2, i32* %addr1, align 4
+  %conv3 = tail call i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128 %m, metadata !"fpexcept.ignore") #0
+  store volatile i64 %conv3, i64* %addr2, align 8
+  %conv4 = tail call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %m, metadata !"fpexcept.ignore") #0
+  store volatile i64 %conv4, i64* %addr2, align 8
+  ret void
+}
+
+attributes #0 = { strictfp }

diff  --git a/llvm/test/CodeGen/PowerPC/fp-strict-conv.ll b/llvm/test/CodeGen/PowerPC/fp-strict-conv.ll
new file mode 100644
index 000000000000..58f979bebcc7
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/fp-strict-conv.ll
@@ -0,0 +1,181 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
+; RUN:   < %s -mtriple=powerpc64-unknown-linux -mcpu=pwr8 | FileCheck %s
+; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
+; RUN:   < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 | FileCheck %s
+; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
+; RUN:   < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr8 -mattr=-vsx | \
+; RUN:   FileCheck %s -check-prefix=NOVSX
+
+declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
+declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
+declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
+declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
+
+declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
+declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
+declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
+declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
+
+declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
+declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
+declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
+declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
+
+declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
+declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
+declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
+declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
+
+define i32 @d_to_i32(double %m) #0 {
+; CHECK-LABEL: d_to_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpsxws f0, f1
+; CHECK-NEXT:    mffprwz r3, f0
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: d_to_i32:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctiwz f0, f1
+; NOVSX-NEXT:    addi r3, r1, -4
+; NOVSX-NEXT:    stfiwx f0, 0, r3
+; NOVSX-NEXT:    lwz r3, -4(r1)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict") #0
+  ret i32 %conv
+}
+
+define i64 @d_to_i64(double %m) #0 {
+; CHECK-LABEL: d_to_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpsxds f0, f1
+; CHECK-NEXT:    mffprd r3, f0
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: d_to_i64:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctidz f0, f1
+; NOVSX-NEXT:    stfd f0, -8(r1)
+; NOVSX-NEXT:    ld r3, -8(r1)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict") #0
+  ret i64 %conv
+}
+
+define i64 @d_to_u64(double %m) #0 {
+; CHECK-LABEL: d_to_u64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpuxds f0, f1
+; CHECK-NEXT:    mffprd r3, f0
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: d_to_u64:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctiduz f0, f1
+; NOVSX-NEXT:    stfd f0, -8(r1)
+; NOVSX-NEXT:    ld r3, -8(r1)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict") #0
+  ret i64 %conv
+}
+
+define zeroext i32 @d_to_u32(double %m) #0 {
+; CHECK-LABEL: d_to_u32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpuxws f0, f1
+; CHECK-NEXT:    mffprwz r3, f0
+; CHECK-NEXT:    clrldi r3, r3, 32
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: d_to_u32:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctiwuz f0, f1
+; NOVSX-NEXT:    addi r3, r1, -4
+; NOVSX-NEXT:    stfiwx f0, 0, r3
+; NOVSX-NEXT:    lwz r3, -4(r1)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict") #0
+  ret i32 %conv
+}
+
+define signext i32 @f_to_i32(float %m) #0 {
+; CHECK-LABEL: f_to_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpsxws f0, f1
+; CHECK-NEXT:    mffprwz r3, f0
+; CHECK-NEXT:    extsw r3, r3
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: f_to_i32:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctiwz f0, f1
+; NOVSX-NEXT:    addi r3, r1, -4
+; NOVSX-NEXT:    stfiwx f0, 0, r3
+; NOVSX-NEXT:    lwa r3, -4(r1)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict") #0
+  ret i32 %conv
+}
+
+define i64 @f_to_i64(float %m) #0 {
+; CHECK-LABEL: f_to_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpsxds f0, f1
+; CHECK-NEXT:    mffprd r3, f0
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: f_to_i64:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctidz f0, f1
+; NOVSX-NEXT:    stfd f0, -8(r1)
+; NOVSX-NEXT:    ld r3, -8(r1)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict") #0
+  ret i64 %conv
+}
+
+define i64 @f_to_u64(float %m) #0 {
+; CHECK-LABEL: f_to_u64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpuxds f0, f1
+; CHECK-NEXT:    mffprd r3, f0
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: f_to_u64:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctiduz f0, f1
+; NOVSX-NEXT:    stfd f0, -8(r1)
+; NOVSX-NEXT:    ld r3, -8(r1)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict") #0
+  ret i64 %conv
+}
+
+define zeroext i32 @f_to_u32(float %m) #0 {
+; CHECK-LABEL: f_to_u32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    xscvdpuxws f0, f1
+; CHECK-NEXT:    mffprwz r3, f0
+; CHECK-NEXT:    clrldi r3, r3, 32
+; CHECK-NEXT:    blr
+;
+; NOVSX-LABEL: f_to_u32:
+; NOVSX:       # %bb.0: # %entry
+; NOVSX-NEXT:    fctiwuz f0, f1
+; NOVSX-NEXT:    addi r3, r1, -4
+; NOVSX-NEXT:    stfiwx f0, 0, r3
+; NOVSX-NEXT:    lwz r3, -4(r1)
+; NOVSX-NEXT:    blr
+entry:
+  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict") #0
+  ret i32 %conv
+}
+
+attributes #0 = { strictfp }


        


More information about the llvm-commits mailing list