[llvm] e147a0f - [LoongArch] Add codegen support for converting between unsigned integer and floating-point

Weining Lu via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 13 00:27:31 PDT 2022


Author: gonglingqin
Date: 2022-07-13T15:25:44+08:00
New Revision: e147a0f65a52147220de6ce0e7db5bc926680728

URL: https://github.com/llvm/llvm-project/commit/e147a0f65a52147220de6ce0e7db5bc926680728
DIFF: https://github.com/llvm/llvm-project/commit/e147a0f65a52147220de6ce0e7db5bc926680728.diff

LOG: [LoongArch] Add codegen support for converting between unsigned integer and floating-point

Differential Revision: https://reviews.llvm.org/D128900

Added: 
    

Modified: 
    llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
    llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
    llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
    llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
    llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
    llvm/lib/Target/LoongArch/LoongArchISelLowering.h
    llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
    llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
index a90313a2c70b..20448492a558 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td
@@ -165,6 +165,7 @@ def : PatFPSetcc<SETULT, FCMP_CULT_S, FPR32>;
 def : PatFPSetcc<SETULE, FCMP_CULE_S, FPR32>;
 def : PatFPSetcc<SETUNE, FCMP_CUNE_S, FPR32>;
 def : PatFPSetcc<SETUO,  FCMP_CUN_S,  FPR32>;
+def : PatFPSetcc<SETLT,  FCMP_CLT_S,  FPR32>;
 
 // TODO: Match signaling comparison strict_fsetccs with FCMP_S*_S instructions.
 

diff  --git a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
index b62e4d8c31ac..bb50cec9f4c0 100644
--- a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td
@@ -169,6 +169,7 @@ def : PatFPSetcc<SETULT, FCMP_CULT_D, FPR64>;
 def : PatFPSetcc<SETULE, FCMP_CULE_D, FPR64>;
 def : PatFPSetcc<SETUNE, FCMP_CUNE_D, FPR64>;
 def : PatFPSetcc<SETUO,  FCMP_CUN_D,  FPR64>;
+def : PatFPSetcc<SETLT,  FCMP_CLT_D,  FPR64>;
 
 // TODO: Match signaling comparison strict_fsetccs with FCMP_S*_D instructions.
 
@@ -222,6 +223,9 @@ def : Pat<(f64 (sint_to_fp (i64 (sexti32 (i64 GPR:$src))))),
           (FFINT_D_W (MOVGR2FR_W GPR:$src))>;
 def : Pat<(f64 (sint_to_fp GPR:$src)), (FFINT_D_L (MOVGR2FR_D GPR:$src))>;
 
+def : Pat<(f64 (uint_to_fp (i64 (zexti32 (i64 GPR:$src))))),
+          (FFINT_D_W (MOVGR2FR_W GPR:$src))>;
+
 def : Pat<(bitconvert GPR:$src), (MOVGR2FR_D GPR:$src)>;
 
 // Convert FP to int

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
index a9df0c9bce6c..bb40ff817574 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp
@@ -161,6 +161,24 @@ bool LoongArchDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
   return false;
 }
 
+bool LoongArchDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
+  if (N.getOpcode() == ISD::AND) {
+    auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1));
+    if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) {
+      Val = N.getOperand(0);
+      return true;
+    }
+  }
+  MVT VT = N.getSimpleValueType();
+  APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32);
+  if (CurDAG->MaskedValueIsZero(N, Mask)) {
+    Val = N;
+    return true;
+  }
+
+  return false;
+}
+
 // This pass converts a legalized DAG into a LoongArch-specific DAG, ready
 // for instruction scheduling.
 FunctionPass *llvm::createLoongArchISelDag(LoongArchTargetMachine &TM) {

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
index 3392ec7fe929..7ad329a64424 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h
@@ -49,6 +49,7 @@ class LoongArchDAGToDAGISel : public SelectionDAGISel {
   }
 
   bool selectSExti32(SDValue N, SDValue &Val);
+  bool selectZExti32(SDValue N, SDValue &Val);
 
 // Include the pieces autogenerated from the target description.
 #include "LoongArchGenDAGISel.inc"

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index beeb072d2172..93207eb78e4c 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -60,6 +60,8 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::SRL, MVT::i32, Custom);
     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
+    if (Subtarget.hasBasicF() && !Subtarget.hasBasicD())
+      setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
   }
 
   static const ISD::CondCode FPCCToExpand[] = {ISD::SETOGT, ISD::SETOGE,
@@ -80,10 +82,12 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
   setOperationAction(ISD::SELECT_CC, GRLenVT, Expand);
   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
   setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, GRLenVT, Expand);
-
   if (!Subtarget.is64Bit())
     setLibcallName(RTLIB::MUL_I128, nullptr);
 
+  setOperationAction(ISD::FP_TO_UINT, GRLenVT, Custom);
+  setOperationAction(ISD::UINT_TO_FP, GRLenVT, Custom);
+
   // Compute derived properties from the register classes.
   computeRegisterProperties(STI.getRegisterInfo());
 
@@ -125,9 +129,30 @@ SDValue LoongArchTargetLowering::LowerOperation(SDValue Op,
     return lowerFP_TO_SINT(Op, DAG);
   case ISD::BITCAST:
     return lowerBITCAST(Op, DAG);
+  case ISD::FP_TO_UINT:
+    return SDValue();
+  case ISD::UINT_TO_FP:
+    return lowerUINT_TO_FP(Op, DAG);
   }
 }
 
+SDValue LoongArchTargetLowering::lowerUINT_TO_FP(SDValue Op,
+                                                 SelectionDAG &DAG) const {
+
+  SDLoc DL(Op);
+  auto &TLI = DAG.getTargetLoweringInfo();
+  SDValue Tmp1, Tmp2;
+  SDValue Op1 = Op.getOperand(0);
+  if (Op1->getOpcode() == ISD::AssertZext ||
+      Op1->getOpcode() == ISD::AssertSext)
+    return Op;
+  SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op.getOperand(0));
+  SDValue Res = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f64, Trunc);
+  SDNode *N = Res.getNode();
+  TLI.expandUINT_TO_FP(N, Tmp1, Tmp2, DAG);
+  return Tmp1;
+}
+
 SDValue LoongArchTargetLowering::lowerBITCAST(SDValue Op,
                                               SelectionDAG &DAG) const {
 
@@ -359,6 +384,15 @@ void LoongArchTargetLowering::ReplaceNodeResults(
     }
     break;
   }
+  case ISD::FP_TO_UINT: {
+    assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+           "Unexpected custom legalisation");
+    auto &TLI = DAG.getTargetLoweringInfo();
+    SDValue Tmp1, Tmp2;
+    TLI.expandFP_TO_UINT(N, Tmp1, Tmp2, DAG);
+    Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Tmp1));
+    break;
+  }
   }
 }
 

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 186348460521..03afd1cb55f8 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -105,6 +105,7 @@ class LoongArchTargetLowering : public TargetLowering {
   SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
 
   bool isFPImmLegal(const APFloat &Imm, EVT VT,
                     bool ForCodeSize) const override;

diff  --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index fd1fa13147e1..fa0b20ea1172 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -591,6 +591,7 @@ def shiftMaskGRLen
 def shiftMask32 : ComplexPattern<i64, 1, "selectShiftMask32", [], [], 0>;
 
 def sexti32 : ComplexPattern<i64, 1, "selectSExti32">;
+def zexti32 : ComplexPattern<i64, 1, "selectZExti32">;
 
 class shiftop<SDPatternOperator operator>
     : PatFrag<(ops node:$val, node:$count),

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll
index 0f8abab2b1be..968a701660c0 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll
@@ -113,6 +113,35 @@ define i32 @convert_double_to_i32(double %a) nounwind {
   ret i32 %1
 }
 
+define i32 @convert_double_to_u32(double %a) nounwind {
+; LA32-LABEL: convert_double_to_u32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    pcalau12i $a0, .LCPI7_0
+; LA32-NEXT:    addi.w $a0, $a0, .LCPI7_0
+; LA32-NEXT:    fld.d $fa1, $a0, 0
+; LA32-NEXT:    fsub.d $fa2, $fa0, $fa1
+; LA32-NEXT:    ftintrz.w.d $fa2, $fa2
+; LA32-NEXT:    movfr2gr.s $a0, $fa2
+; LA32-NEXT:    lu12i.w $a1, -524288
+; LA32-NEXT:    xor $a0, $a0, $a1
+; LA32-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
+; LA32-NEXT:    movcf2gr $a1, $fcc0
+; LA32-NEXT:    masknez $a0, $a0, $a1
+; LA32-NEXT:    ftintrz.w.d $fa0, $fa0
+; LA32-NEXT:    movfr2gr.s $a2, $fa0
+; LA32-NEXT:    maskeqz $a1, $a2, $a1
+; LA32-NEXT:    or $a0, $a1, $a0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_double_to_u32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ftintrz.l.d $fa0, $fa0
+; LA64-NEXT:    movfr2gr.d $a0, $fa0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = fptoui double %a to i32
+  ret i32 %1
+}
+
 define i64 @convert_double_to_i64(double %a) nounwind {
 ; LA32-LABEL: convert_double_to_i64:
 ; LA32:       # %bb.0:
@@ -132,27 +161,136 @@ define i64 @convert_double_to_i64(double %a) nounwind {
   ret i64 %1
 }
 
-define i64 @bitcast_double_to_i64(double %a) nounwind {
-; LA32-LABEL: bitcast_double_to_i64:
+define i64 @convert_double_to_u64(double %a) nounwind {
+; LA32-LABEL: convert_double_to_u64:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    fst.d $fa0, $sp, 8
-; LA32-NEXT:    addi.w $a0, $sp, 8
-; LA32-NEXT:    ori $a0, $a0, 4
-; LA32-NEXT:    ld.w $a1, $a0, 0
-; LA32-NEXT:    ld.w $a0, $sp, 8
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    bl __fixunsdfdi
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
 ; LA32-NEXT:    addi.w $sp, $sp, 16
 ; LA32-NEXT:    jirl $zero, $ra, 0
 ;
-; LA64-LABEL: bitcast_double_to_i64:
+; LA64-LABEL: convert_double_to_u64:
 ; LA64:       # %bb.0:
-; LA64-NEXT:    movfr2gr.d $a0, $fa0
+; LA64-NEXT:    pcalau12i $a0, .LCPI9_0
+; LA64-NEXT:    addi.d $a0, $a0, .LCPI9_0
+; LA64-NEXT:    fld.d $fa1, $a0, 0
+; LA64-NEXT:    fsub.d $fa2, $fa0, $fa1
+; LA64-NEXT:    ftintrz.l.d $fa2, $fa2
+; LA64-NEXT:    movfr2gr.d $a0, $fa2
+; LA64-NEXT:    lu52i.d $a1, $zero, -2048
+; LA64-NEXT:    xor $a0, $a0, $a1
+; LA64-NEXT:    fcmp.clt.d $fcc0, $fa0, $fa1
+; LA64-NEXT:    movcf2gr $a1, $fcc0
+; LA64-NEXT:    masknez $a0, $a0, $a1
+; LA64-NEXT:    ftintrz.l.d $fa0, $fa0
+; LA64-NEXT:    movfr2gr.d $a2, $fa0
+; LA64-NEXT:    maskeqz $a1, $a2, $a1
+; LA64-NEXT:    or $a0, $a1, $a0
 ; LA64-NEXT:    jirl $zero, $ra, 0
-  %1 = bitcast double %a to i64
+  %1 = fptoui double %a to i64
   ret i64 %1
 }
 
-define double @bitcast_i64_to_double(i64 %a) nounwind {
+define double @convert_u8_to_double(i8 zeroext %a) nounwind {
+; LA32-LABEL: convert_u8_to_double:
+; LA32:       # %bb.0:
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
+; LA32-NEXT:    ffint.d.w $fa0, $fa0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_u8_to_double:
+; LA64:       # %bb.0:
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    ffint.d.w $fa0, $fa0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = uitofp i8 %a to double
+  ret double %1
+}
+
+define double @convert_u16_to_double(i16 zeroext %a) nounwind {
+; LA32-LABEL: convert_u16_to_double:
+; LA32:       # %bb.0:
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
+; LA32-NEXT:    ffint.d.w $fa0, $fa0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_u16_to_double:
+; LA64:       # %bb.0:
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    ffint.d.w $fa0, $fa0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = uitofp i16 %a to double
+  ret double %1
+}
+
+define double @convert_u32_to_double(i32 %a) nounwind {
+; LA32-LABEL: convert_u32_to_double:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    addi.w $a1, $sp, 8
+; LA32-NEXT:    ori $a1, $a1, 4
+; LA32-NEXT:    lu12i.w $a2, 275200
+; LA32-NEXT:    st.w $a2, $a1, 0
+; LA32-NEXT:    st.w $a0, $sp, 8
+; LA32-NEXT:    pcalau12i $a0, .LCPI12_0
+; LA32-NEXT:    addi.w $a0, $a0, .LCPI12_0
+; LA32-NEXT:    fld.d $fa0, $a0, 0
+; LA32-NEXT:    fld.d $fa1, $sp, 8
+; LA32-NEXT:    fsub.d $fa0, $fa1, $fa0
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_u32_to_double:
+; LA64:       # %bb.0:
+; LA64-NEXT:    lu52i.d $a1, $zero, 1107
+; LA64-NEXT:    movgr2fr.d $fa0, $a1
+; LA64-NEXT:    pcalau12i $a1, .LCPI12_0
+; LA64-NEXT:    addi.d $a1, $a1, .LCPI12_0
+; LA64-NEXT:    fld.d $fa1, $a1, 0
+; LA64-NEXT:    fsub.d $fa0, $fa0, $fa1
+; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT:    lu52i.d $a1, $zero, 1075
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.d $fa1, $a0
+; LA64-NEXT:    fadd.d $fa0, $fa1, $fa0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = uitofp i32 %a to double
+  ret double %1
+}
+
+define double @convert_u64_to_double(i64 %a) nounwind {
+; LA32-LABEL: convert_u64_to_double:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    bl __floatundidf
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_u64_to_double:
+; LA64:       # %bb.0:
+; LA64-NEXT:    srli.d $a1, $a0, 32
+; LA64-NEXT:    lu52i.d $a2, $zero, 1107
+; LA64-NEXT:    or $a1, $a1, $a2
+; LA64-NEXT:    movgr2fr.d $fa0, $a1
+; LA64-NEXT:    pcalau12i $a1, .LCPI13_0
+; LA64-NEXT:    addi.d $a1, $a1, .LCPI13_0
+; LA64-NEXT:    fld.d $fa1, $a1, 0
+; LA64-NEXT:    fsub.d $fa0, $fa0, $fa1
+; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT:    lu52i.d $a1, $zero, 1075
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.d $fa1, $a0
+; LA64-NEXT:    fadd.d $fa0, $fa1, $fa0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = uitofp i64 %a to double
+  ret double %1
+}
+
+define double @bitcast_i64_to_double(i64 %a, i64 %b) nounwind {
 ; LA32-LABEL: bitcast_i64_to_double:
 ; LA32:       # %bb.0:
 ; LA32-NEXT:    addi.w $sp, $sp, -16
@@ -171,3 +309,23 @@ define double @bitcast_i64_to_double(i64 %a) nounwind {
   %1 = bitcast i64 %a to double
   ret double %1
 }
+
+define i64 @bitcast_double_to_i64(double %a) nounwind {
+; LA32-LABEL: bitcast_double_to_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    fst.d $fa0, $sp, 8
+; LA32-NEXT:    addi.w $a0, $sp, 8
+; LA32-NEXT:    ori $a0, $a0, 4
+; LA32-NEXT:    ld.w $a1, $a0, 0
+; LA32-NEXT:    ld.w $a0, $sp, 8
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: bitcast_double_to_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    movfr2gr.d $a0, $fa0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %1 = bitcast double %a to i64
+  ret i64 %1
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll
index 32a0c369cc40..30e0045a1467 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll
@@ -1,166 +1,650 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32
-; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64
+; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32F
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32D
+; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64F
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64D
 
 define signext i8 @convert_float_to_i8(float %a) nounwind {
-; LA32-LABEL: convert_float_to_i8:
-; LA32:       # %bb.0:
-; LA32-NEXT:    ftintrz.w.s $fa0, $fa0
-; LA32-NEXT:    movfr2gr.s $a0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
-;
-; LA64-LABEL: convert_float_to_i8:
-; LA64:       # %bb.0:
-; LA64-NEXT:    ftintrz.w.s $fa0, $fa0
-; LA64-NEXT:    movfr2gr.s $a0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA32F-LABEL: convert_float_to_i8:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_float_to_i8:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_float_to_i8:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA64F-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_float_to_i8:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-NEXT:    jirl $zero, $ra, 0
   %1 = fptosi float %a to i8
   ret i8 %1
 }
 
 define signext i16 @convert_float_to_i16(float %a) nounwind {
-; LA32-LABEL: convert_float_to_i16:
-; LA32:       # %bb.0:
-; LA32-NEXT:    ftintrz.w.s $fa0, $fa0
-; LA32-NEXT:    movfr2gr.s $a0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
-;
-; LA64-LABEL: convert_float_to_i16:
-; LA64:       # %bb.0:
-; LA64-NEXT:    ftintrz.w.s $fa0, $fa0
-; LA64-NEXT:    movfr2gr.s $a0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA32F-LABEL: convert_float_to_i16:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_float_to_i16:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_float_to_i16:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA64F-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_float_to_i16:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-NEXT:    jirl $zero, $ra, 0
   %1 = fptosi float %a to i16
   ret i16 %1
 }
 
 define i32 @convert_float_to_i32(float %a) nounwind {
-; LA32-LABEL: convert_float_to_i32:
-; LA32:       # %bb.0:
-; LA32-NEXT:    ftintrz.w.s $fa0, $fa0
-; LA32-NEXT:    movfr2gr.s $a0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
-;
-; LA64-LABEL: convert_float_to_i32:
-; LA64:       # %bb.0:
-; LA64-NEXT:    ftintrz.w.s $fa0, $fa0
-; LA64-NEXT:    movfr2gr.s $a0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA32F-LABEL: convert_float_to_i32:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_float_to_i32:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_float_to_i32:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA64F-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_float_to_i32:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-NEXT:    jirl $zero, $ra, 0
   %1 = fptosi float %a to i32
   ret i32 %1
 }
 
 define i64 @convert_float_to_i64(float %a) nounwind {
-; LA32-LABEL: convert_float_to_i64:
-; LA32:       # %bb.0:
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl __fixsfdi
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
-;
-; LA64-LABEL: convert_float_to_i64:
-; LA64:       # %bb.0:
-; LA64-NEXT:    ftintrz.w.s $fa0, $fa0
-; LA64-NEXT:    movfr2gr.s $a0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA32F-LABEL: convert_float_to_i64:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    addi.w $sp, $sp, -16
+; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-NEXT:    bl __fixsfdi
+; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-NEXT:    addi.w $sp, $sp, 16
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_float_to_i64:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-NEXT:    bl __fixsfdi
+; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_float_to_i64:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA64F-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_float_to_i64:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-NEXT:    jirl $zero, $ra, 0
   %1 = fptosi float %a to i64
   ret i64 %1
 }
 
+define zeroext i8 @convert_float_to_u8(float %a) nounwind {
+; LA32F-LABEL: convert_float_to_u8:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_float_to_u8:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_float_to_u8:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA64F-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_float_to_u8:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-NEXT:    jirl $zero, $ra, 0
+  %1 = fptoui float %a to i8
+  ret i8 %1
+}
+
+define zeroext i16 @convert_float_to_u16(float %a) nounwind {
+; LA32F-LABEL: convert_float_to_u16:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_float_to_u16:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_float_to_u16:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA64F-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_float_to_u16:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-NEXT:    jirl $zero, $ra, 0
+  %1 = fptoui float %a to i16
+  ret i16 %1
+}
+
+define i32 @convert_float_to_u32(float %a) nounwind {
+; LA32F-LABEL: convert_float_to_u32:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    pcalau12i $a0, .LCPI6_0
+; LA32F-NEXT:    addi.w $a0, $a0, .LCPI6_0
+; LA32F-NEXT:    fld.s $fa1, $a0, 0
+; LA32F-NEXT:    fsub.s $fa2, $fa0, $fa1
+; LA32F-NEXT:    ftintrz.w.s $fa2, $fa2
+; LA32F-NEXT:    movfr2gr.s $a0, $fa2
+; LA32F-NEXT:    lu12i.w $a1, -524288
+; LA32F-NEXT:    xor $a0, $a0, $a1
+; LA32F-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
+; LA32F-NEXT:    movcf2gr $a1, $fcc0
+; LA32F-NEXT:    masknez $a0, $a0, $a1
+; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32F-NEXT:    movfr2gr.s $a2, $fa0
+; LA32F-NEXT:    maskeqz $a1, $a2, $a1
+; LA32F-NEXT:    or $a0, $a1, $a0
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_float_to_u32:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    pcalau12i $a0, .LCPI6_0
+; LA32D-NEXT:    addi.w $a0, $a0, .LCPI6_0
+; LA32D-NEXT:    fld.s $fa1, $a0, 0
+; LA32D-NEXT:    fsub.s $fa2, $fa0, $fa1
+; LA32D-NEXT:    ftintrz.w.s $fa2, $fa2
+; LA32D-NEXT:    movfr2gr.s $a0, $fa2
+; LA32D-NEXT:    lu12i.w $a1, -524288
+; LA32D-NEXT:    xor $a0, $a0, $a1
+; LA32D-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
+; LA32D-NEXT:    movcf2gr $a1, $fcc0
+; LA32D-NEXT:    masknez $a0, $a0, $a1
+; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA32D-NEXT:    movfr2gr.s $a2, $fa0
+; LA32D-NEXT:    maskeqz $a1, $a2, $a1
+; LA32D-NEXT:    or $a0, $a1, $a0
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_float_to_u32:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    pcalau12i $a0, .LCPI6_0
+; LA64F-NEXT:    addi.d $a0, $a0, .LCPI6_0
+; LA64F-NEXT:    fld.s $fa1, $a0, 0
+; LA64F-NEXT:    fsub.s $fa2, $fa0, $fa1
+; LA64F-NEXT:    ftintrz.w.s $fa2, $fa2
+; LA64F-NEXT:    movfr2gr.s $a0, $fa2
+; LA64F-NEXT:    lu12i.w $a1, -524288
+; LA64F-NEXT:    xor $a0, $a0, $a1
+; LA64F-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
+; LA64F-NEXT:    movcf2gr $a1, $fcc0
+; LA64F-NEXT:    masknez $a0, $a0, $a1
+; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:    maskeqz $a1, $a2, $a1
+; LA64F-NEXT:    or $a0, $a1, $a0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_float_to_u32:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-NEXT:    movfr2gr.d $a0, $fa0
+; LA64D-NEXT:    jirl $zero, $ra, 0
+  %1 = fptoui float %a to i32
+  ret i32 %1
+}
+
+define i64 @convert_float_to_u64(float %a) nounwind {
+; LA32F-LABEL: convert_float_to_u64:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    addi.w $sp, $sp, -16
+; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-NEXT:    bl __fixunssfdi
+; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-NEXT:    addi.w $sp, $sp, 16
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_float_to_u64:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-NEXT:    bl __fixunssfdi
+; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_float_to_u64:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    pcalau12i $a0, .LCPI7_0
+; LA64F-NEXT:    addi.d $a0, $a0, .LCPI7_0
+; LA64F-NEXT:    fld.s $fa1, $a0, 0
+; LA64F-NEXT:    fsub.s $fa2, $fa0, $fa1
+; LA64F-NEXT:    ftintrz.w.s $fa2, $fa2
+; LA64F-NEXT:    movfr2gr.s $a0, $fa2
+; LA64F-NEXT:    lu52i.d $a1, $zero, -2048
+; LA64F-NEXT:    xor $a0, $a0, $a1
+; LA64F-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
+; LA64F-NEXT:    movcf2gr $a1, $fcc0
+; LA64F-NEXT:    masknez $a0, $a0, $a1
+; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
+; LA64F-NEXT:    movfr2gr.s $a2, $fa0
+; LA64F-NEXT:    maskeqz $a1, $a2, $a1
+; LA64F-NEXT:    or $a0, $a1, $a0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_float_to_u64:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    pcalau12i $a0, .LCPI7_0
+; LA64D-NEXT:    addi.d $a0, $a0, .LCPI7_0
+; LA64D-NEXT:    fld.s $fa1, $a0, 0
+; LA64D-NEXT:    fsub.s $fa2, $fa0, $fa1
+; LA64D-NEXT:    ftintrz.l.s $fa2, $fa2
+; LA64D-NEXT:    movfr2gr.d $a0, $fa2
+; LA64D-NEXT:    lu52i.d $a1, $zero, -2048
+; LA64D-NEXT:    xor $a0, $a0, $a1
+; LA64D-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
+; LA64D-NEXT:    movcf2gr $a1, $fcc0
+; LA64D-NEXT:    masknez $a0, $a0, $a1
+; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
+; LA64D-NEXT:    movfr2gr.d $a2, $fa0
+; LA64D-NEXT:    maskeqz $a1, $a2, $a1
+; LA64D-NEXT:    or $a0, $a1, $a0
+; LA64D-NEXT:    jirl $zero, $ra, 0
+  %1 = fptoui float %a to i64
+  ret i64 %1
+}
+
 define float @convert_i8_to_float(i8 signext %a) nounwind {
-; LA32-LABEL: convert_i8_to_float:
-; LA32:       # %bb.0:
-; LA32-NEXT:    movgr2fr.w $fa0, $a0
-; LA32-NEXT:    ffint.s.w $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
-;
-; LA64-LABEL: convert_i8_to_float:
-; LA64:       # %bb.0:
-; LA64-NEXT:    movgr2fr.w $fa0, $a0
-; LA64-NEXT:    ffint.s.w $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA32F-LABEL: convert_i8_to_float:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-NEXT:    ffint.s.w $fa0, $fa0
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_i8_to_float:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-NEXT:    ffint.s.w $fa0, $fa0
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_i8_to_float:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-NEXT:    ffint.s.w $fa0, $fa0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_i8_to_float:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-NEXT:    ffint.s.w $fa0, $fa0
+; LA64D-NEXT:    jirl $zero, $ra, 0
   %1 = sitofp i8 %a to float
   ret float %1
 }
 
 define float @convert_i16_to_float(i16 signext %a) nounwind {
-; LA32-LABEL: convert_i16_to_float:
-; LA32:       # %bb.0:
-; LA32-NEXT:    movgr2fr.w $fa0, $a0
-; LA32-NEXT:    ffint.s.w $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
-;
-; LA64-LABEL: convert_i16_to_float:
-; LA64:       # %bb.0:
-; LA64-NEXT:    movgr2fr.w $fa0, $a0
-; LA64-NEXT:    ffint.s.w $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA32F-LABEL: convert_i16_to_float:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-NEXT:    ffint.s.w $fa0, $fa0
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_i16_to_float:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-NEXT:    ffint.s.w $fa0, $fa0
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_i16_to_float:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-NEXT:    ffint.s.w $fa0, $fa0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_i16_to_float:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-NEXT:    ffint.s.w $fa0, $fa0
+; LA64D-NEXT:    jirl $zero, $ra, 0
   %1 = sitofp i16 %a to float
   ret float %1
 }
 
 define float @convert_i32_to_float(i32 %a) nounwind {
-; LA32-LABEL: convert_i32_to_float:
-; LA32:       # %bb.0:
-; LA32-NEXT:    movgr2fr.w $fa0, $a0
-; LA32-NEXT:    ffint.s.w $fa0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
-;
-; LA64-LABEL: convert_i32_to_float:
-; LA64:       # %bb.0:
-; LA64-NEXT:    addi.w $a0, $a0, 0
-; LA64-NEXT:    movgr2fr.w $fa0, $a0
-; LA64-NEXT:    ffint.s.w $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA32F-LABEL: convert_i32_to_float:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-NEXT:    ffint.s.w $fa0, $fa0
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_i32_to_float:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-NEXT:    ffint.s.w $fa0, $fa0
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_i32_to_float:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    addi.w $a0, $a0, 0
+; LA64F-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-NEXT:    ffint.s.w $fa0, $fa0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_i32_to_float:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    addi.w $a0, $a0, 0
+; LA64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-NEXT:    ffint.s.w $fa0, $fa0
+; LA64D-NEXT:    jirl $zero, $ra, 0
   %1 = sitofp i32 %a to float
   ret float %1
 }
 
 define float @convert_i64_to_float(i64 %a) nounwind {
-; LA32-LABEL: convert_i64_to_float:
-; LA32:       # %bb.0:
-; LA32-NEXT:    addi.w $sp, $sp, -16
-; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
-; LA32-NEXT:    bl __floatdisf
-; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
-; LA32-NEXT:    addi.w $sp, $sp, 16
-; LA32-NEXT:    jirl $zero, $ra, 0
-;
-; LA64-LABEL: convert_i64_to_float:
-; LA64:       # %bb.0:
-; LA64-NEXT:    movgr2fr.w $fa0, $a0
-; LA64-NEXT:    ffint.s.w $fa0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA32F-LABEL: convert_i64_to_float:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    addi.w $sp, $sp, -16
+; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-NEXT:    bl __floatdisf
+; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-NEXT:    addi.w $sp, $sp, 16
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_i64_to_float:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-NEXT:    bl __floatdisf
+; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_i64_to_float:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-NEXT:    ffint.s.w $fa0, $fa0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_i64_to_float:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-NEXT:    ffint.s.w $fa0, $fa0
+; LA64D-NEXT:    jirl $zero, $ra, 0
   %1 = sitofp i64 %a to float
   ret float %1
 }
 
+define float @convert_u8_to_float(i8 zeroext %a) nounwind {
+; LA32F-LABEL: convert_u8_to_float:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-NEXT:    ffint.s.w $fa0, $fa0
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_u8_to_float:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-NEXT:    ffint.s.w $fa0, $fa0
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_u8_to_float:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-NEXT:    ffint.s.w $fa0, $fa0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_u8_to_float:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-NEXT:    ffint.s.w $fa0, $fa0
+; LA64D-NEXT:    jirl $zero, $ra, 0
+  %1 = uitofp i8 %a to float
+  ret float %1
+}
+
+define float @convert_u16_to_float(i16 zeroext %a) nounwind {
+; LA32F-LABEL: convert_u16_to_float:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-NEXT:    ffint.s.w $fa0, $fa0
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_u16_to_float:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-NEXT:    ffint.s.w $fa0, $fa0
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_u16_to_float:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-NEXT:    ffint.s.w $fa0, $fa0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_u16_to_float:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-NEXT:    ffint.s.w $fa0, $fa0
+; LA64D-NEXT:    jirl $zero, $ra, 0
+  %1 = uitofp i16 %a to float
+  ret float %1
+}
+
+define float @convert_u32_to_float(i32 %a) nounwind {
+; LA32F-LABEL: convert_u32_to_float:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    srli.w $a1, $a0, 1
+; LA32F-NEXT:    andi $a2, $a0, 1
+; LA32F-NEXT:    or $a1, $a2, $a1
+; LA32F-NEXT:    movgr2fr.w $fa0, $a1
+; LA32F-NEXT:    ffint.s.w $fa0, $fa0
+; LA32F-NEXT:    fadd.s $fa0, $fa0, $fa0
+; LA32F-NEXT:    slti $a1, $a0, 0
+; LA32F-NEXT:    movgr2cf $fcc0, $a1
+; LA32F-NEXT:    movgr2fr.w $fa1, $a0
+; LA32F-NEXT:    ffint.s.w $fa1, $fa1
+; LA32F-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_u32_to_float:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-NEXT:    addi.w $a1, $sp, 8
+; LA32D-NEXT:    ori $a1, $a1, 4
+; LA32D-NEXT:    lu12i.w $a2, 275200
+; LA32D-NEXT:    st.w $a2, $a1, 0
+; LA32D-NEXT:    st.w $a0, $sp, 8
+; LA32D-NEXT:    pcalau12i $a0, .LCPI14_0
+; LA32D-NEXT:    addi.w $a0, $a0, .LCPI14_0
+; LA32D-NEXT:    fld.d $fa0, $a0, 0
+; LA32D-NEXT:    fld.d $fa1, $sp, 8
+; LA32D-NEXT:    fsub.d $fa0, $fa1, $fa0
+; LA32D-NEXT:    fcvt.s.d $fa0, $fa0
+; LA32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_u32_to_float:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    bstrpick.d $a1, $a0, 31, 1
+; LA64F-NEXT:    andi $a2, $a0, 1
+; LA64F-NEXT:    or $a1, $a2, $a1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a1
+; LA64F-NEXT:    ffint.s.w $fa0, $fa0
+; LA64F-NEXT:    fadd.s $fa0, $fa0, $fa0
+; LA64F-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64F-NEXT:    slti $a1, $a0, 0
+; LA64F-NEXT:    movgr2cf $fcc0, $a1
+; LA64F-NEXT:    movgr2fr.w $fa1, $a0
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
+; LA64F-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_u32_to_float:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    bstrpick.d $a1, $a0, 31, 1
+; LA64D-NEXT:    andi $a2, $a0, 1
+; LA64D-NEXT:    or $a1, $a2, $a1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a1
+; LA64D-NEXT:    ffint.s.w $fa0, $fa0
+; LA64D-NEXT:    fadd.s $fa0, $fa0, $fa0
+; LA64D-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64D-NEXT:    slti $a1, $a0, 0
+; LA64D-NEXT:    movgr2cf $fcc0, $a1
+; LA64D-NEXT:    movgr2fr.w $fa1, $a0
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
+; LA64D-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64D-NEXT:    jirl $zero, $ra, 0
+  %1 = uitofp i32 %a to float
+  ret float %1
+}
+
+define float @convert_u64_to_float(i64 %a) nounwind {
+; LA32F-LABEL: convert_u64_to_float:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    addi.w $sp, $sp, -16
+; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32F-NEXT:    bl __floatundisf
+; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32F-NEXT:    addi.w $sp, $sp, 16
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: convert_u64_to_float:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    addi.w $sp, $sp, -16
+; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32D-NEXT:    bl __floatundisf
+; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32D-NEXT:    addi.w $sp, $sp, 16
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: convert_u64_to_float:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    srli.d $a1, $a0, 1
+; LA64F-NEXT:    andi $a2, $a0, 1
+; LA64F-NEXT:    or $a1, $a2, $a1
+; LA64F-NEXT:    movgr2fr.w $fa0, $a1
+; LA64F-NEXT:    ffint.s.w $fa0, $fa0
+; LA64F-NEXT:    fadd.s $fa0, $fa0, $fa0
+; LA64F-NEXT:    slti $a1, $a0, 0
+; LA64F-NEXT:    movgr2cf $fcc0, $a1
+; LA64F-NEXT:    movgr2fr.w $fa1, $a0
+; LA64F-NEXT:    ffint.s.w $fa1, $fa1
+; LA64F-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: convert_u64_to_float:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    srli.d $a1, $a0, 1
+; LA64D-NEXT:    andi $a2, $a0, 1
+; LA64D-NEXT:    or $a1, $a2, $a1
+; LA64D-NEXT:    movgr2fr.w $fa0, $a1
+; LA64D-NEXT:    ffint.s.w $fa0, $fa0
+; LA64D-NEXT:    fadd.s $fa0, $fa0, $fa0
+; LA64D-NEXT:    slti $a1, $a0, 0
+; LA64D-NEXT:    movgr2cf $fcc0, $a1
+; LA64D-NEXT:    movgr2fr.w $fa1, $a0
+; LA64D-NEXT:    ffint.s.w $fa1, $fa1
+; LA64D-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
+; LA64D-NEXT:    jirl $zero, $ra, 0
+  %1 = uitofp i64 %a to float
+  ret float %1
+}
+
 define i32 @bitcast_float_to_i32(float %a) nounwind {
-; LA32-LABEL: bitcast_float_to_i32:
-; LA32:       # %bb.0:
-; LA32-NEXT:    movfr2gr.s $a0, $fa0
-; LA32-NEXT:    jirl $zero, $ra, 0
-;
-; LA64-LABEL: bitcast_float_to_i32:
-; LA64:       # %bb.0:
-; LA64-NEXT:    movfr2gr.s $a0, $fa0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA32F-LABEL: bitcast_float_to_i32:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    movfr2gr.s $a0, $fa0
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: bitcast_float_to_i32:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    movfr2gr.s $a0, $fa0
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: bitcast_float_to_i32:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    movfr2gr.s $a0, $fa0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: bitcast_float_to_i32:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    movfr2gr.s $a0, $fa0
+; LA64D-NEXT:    jirl $zero, $ra, 0
   %1 = bitcast float %a to i32
   ret i32 %1
 }
 
 define float @bitcast_i32_to_float(i32 %a) nounwind {
-; LA32-LABEL: bitcast_i32_to_float:
-; LA32:       # %bb.0:
-; LA32-NEXT:    movgr2fr.w $fa0, $a0
-; LA32-NEXT:    jirl $zero, $ra, 0
-;
-; LA64-LABEL: bitcast_i32_to_float:
-; LA64:       # %bb.0:
-; LA64-NEXT:    movgr2fr.w $fa0, $a0
-; LA64-NEXT:    jirl $zero, $ra, 0
+; LA32F-LABEL: bitcast_i32_to_float:
+; LA32F:       # %bb.0:
+; LA32F-NEXT:    movgr2fr.w $fa0, $a0
+; LA32F-NEXT:    jirl $zero, $ra, 0
+;
+; LA32D-LABEL: bitcast_i32_to_float:
+; LA32D:       # %bb.0:
+; LA32D-NEXT:    movgr2fr.w $fa0, $a0
+; LA32D-NEXT:    jirl $zero, $ra, 0
+;
+; LA64F-LABEL: bitcast_i32_to_float:
+; LA64F:       # %bb.0:
+; LA64F-NEXT:    movgr2fr.w $fa0, $a0
+; LA64F-NEXT:    jirl $zero, $ra, 0
+;
+; LA64D-LABEL: bitcast_i32_to_float:
+; LA64D:       # %bb.0:
+; LA64D-NEXT:    movgr2fr.w $fa0, $a0
+; LA64D-NEXT:    jirl $zero, $ra, 0
   %1 = bitcast i32 %a to float
   ret float %1
 }


        


More information about the llvm-commits mailing list