[llvm] 2dc0fa0 - [RISCV][CodeGen] Support Zdinx on RV64 codegen

Shao-Ce SUN via llvm-commits llvm-commits at lists.llvm.org
Wed May 3 18:00:49 PDT 2023


Author: Shao-Ce SUN
Date: 2023-05-04T09:00:40+08:00
New Revision: 2dc0fa050eff14fe5c4249fb44d42f29b41d5da4

URL: https://github.com/llvm/llvm-project/commit/2dc0fa050eff14fe5c4249fb44d42f29b41d5da4
DIFF: https://github.com/llvm/llvm-project/commit/2dc0fa050eff14fe5c4249fb44d42f29b41d5da4.diff

LOG: [RISCV][CodeGen] Support Zdinx on RV64 codegen

This patch was split from D122918 . Co-Author: @liaolucy @realqhc

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D149665

Added: 
    

Modified: 
    llvm/docs/RISCVUsage.rst
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoD.td
    llvm/lib/Target/RISCV/RISCVSubtarget.h
    llvm/test/CodeGen/RISCV/double-arith-strict.ll
    llvm/test/CodeGen/RISCV/double-arith.ll
    llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
    llvm/test/CodeGen/RISCV/double-br-fcmp.ll
    llvm/test/CodeGen/RISCV/double-convert-strict.ll
    llvm/test/CodeGen/RISCV/double-convert.ll
    llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
    llvm/test/CodeGen/RISCV/double-fcmp.ll
    llvm/test/CodeGen/RISCV/double-frem.ll
    llvm/test/CodeGen/RISCV/double-imm.ll
    llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
    llvm/test/CodeGen/RISCV/double-intrinsics.ll
    llvm/test/CodeGen/RISCV/double-isnan.ll
    llvm/test/CodeGen/RISCV/double-mem.ll
    llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
    llvm/test/CodeGen/RISCV/double-round-conv.ll
    llvm/test/CodeGen/RISCV/double-select-fcmp.ll
    llvm/test/CodeGen/RISCV/double-select-icmp.ll
    llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll

Removed: 
    


################################################################################
diff  --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst
index c76f45c7c381..5c4ae89085f1 100644
--- a/llvm/docs/RISCVUsage.rst
+++ b/llvm/docs/RISCVUsage.rst
@@ -100,7 +100,7 @@ on support follow.
      ``Zbkc``         Supported
      ``Zbkx``         Supported (`See note <#riscv-scalar-crypto-note1>`__)
      ``Zbs``          Supported
-     ``Zdinx``        Assembly Support
+     ``Zdinx``        Assembly Support for RV32. Full support for RV64.
      ``Zfh``          Supported
      ``Zfhmin``       Supported
      ``Zfinx``        Supported

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 08c2f741d06d..b77c8e2c7a31 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -890,7 +890,11 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
       // For RV32, we can't move from a GPR, we need to convert instead. This
       // should only happen for +0.0 and -0.0.
       assert((Subtarget->is64Bit() || APF.isZero()) && "Unexpected constant");
-      Opc = Subtarget->is64Bit() ? RISCV::FMV_D_X : RISCV::FCVT_D_W;
+      bool HasZdinx = Subtarget->hasStdExtZdinx();
+      if (Subtarget->is64Bit())
+        Opc = HasZdinx ? RISCV::COPY : RISCV::FMV_D_X;
+      else
+        Opc = RISCV::FCVT_D_W;
       break;
     }
 

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 336cbfc93874..423c102e1f4a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -122,6 +122,10 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
   if (Subtarget.hasStdExtZfinx())
     addRegisterClass(MVT::f32, &RISCV::GPRF32RegClass);
+  if (Subtarget.hasStdExtZdinx()) {
+    if (Subtarget.is64Bit())
+      addRegisterClass(MVT::f64, &RISCV::GPRF64RegClass);
+  }
 
   static const MVT::SimpleValueType BoolVecVTs[] = {
       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
@@ -429,7 +433,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   if (Subtarget.hasStdExtFOrZfinx() && Subtarget.is64Bit())
     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
 
-  if (Subtarget.hasStdExtD()) {
+  if (Subtarget.hasStdExtDOrZdinx()) {
     setOperationAction(FPLegalNodeTypes, MVT::f64, Legal);
 
     if (Subtarget.hasStdExtZfa()) {
@@ -1075,7 +1079,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
       if (Subtarget.hasStdExtFOrZfinx())
         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
-      if (Subtarget.hasStdExtD())
+      if (Subtarget.hasStdExtDOrZdinx())
         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
     }
   }
@@ -1799,7 +1803,7 @@ bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
   else if (VT == MVT::f32)
     IsLegalVT = Subtarget.hasStdExtFOrZfinx();
   else if (VT == MVT::f64)
-    IsLegalVT = Subtarget.hasStdExtD();
+    IsLegalVT = Subtarget.hasStdExtDOrZdinx();
 
   if (!IsLegalVT)
     return false;
@@ -12670,6 +12674,7 @@ static bool isSelectPseudo(MachineInstr &MI) {
   case RISCV::Select_FPR32_Using_CC_GPR:
   case RISCV::Select_FPR32INX_Using_CC_GPR:
   case RISCV::Select_FPR64_Using_CC_GPR:
+  case RISCV::Select_FPR64INX_Using_CC_GPR:
     return true;
   }
 }
@@ -13071,6 +13076,15 @@ static MachineBasicBlock *emitFROUND(MachineInstr &MI, MachineBasicBlock *MBB,
     FSGNJXOpc = RISCV::FSGNJX_D;
     RC = &RISCV::FPR64RegClass;
     break;
+  case RISCV::PseudoFROUND_D_INX:
+    assert(Subtarget.is64Bit() && "Expected 64-bit GPR.");
+    CmpOpc = RISCV::FLT_D_INX;
+    F2IOpc = RISCV::FCVT_L_D_INX;
+    I2FOpc = RISCV::FCVT_D_L_INX;
+    FSGNJOpc = RISCV::FSGNJ_D_INX;
+    FSGNJXOpc = RISCV::FSGNJX_D_INX;
+    RC = &RISCV::GPRF64RegClass;
+    break;
   }
 
   const BasicBlock *BB = MBB->getBasicBlock();
@@ -13161,6 +13175,7 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
   case RISCV::Select_FPR32_Using_CC_GPR:
   case RISCV::Select_FPR32INX_Using_CC_GPR:
   case RISCV::Select_FPR64_Using_CC_GPR:
+  case RISCV::Select_FPR64INX_Using_CC_GPR:
     return emitSelectPseudo(MI, BB, Subtarget);
   case RISCV::BuildPairF64Pseudo:
     return emitBuildPairF64Pseudo(MI, BB, Subtarget);
@@ -13180,8 +13195,12 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
     return emitQuietFCMP(MI, BB, RISCV::FLT_S_INX, RISCV::FEQ_S_INX, Subtarget);
   case RISCV::PseudoQuietFLE_D:
     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
+  case RISCV::PseudoQuietFLE_D_INX:
+    return emitQuietFCMP(MI, BB, RISCV::FLE_D_INX, RISCV::FEQ_D_INX, Subtarget);
   case RISCV::PseudoQuietFLT_D:
     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
+  case RISCV::PseudoQuietFLT_D_INX:
+    return emitQuietFCMP(MI, BB, RISCV::FLT_D_INX, RISCV::FEQ_D_INX, Subtarget);
 
     // =========================================================================
     // VFCVT
@@ -13365,6 +13384,7 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
   case RISCV::PseudoFROUND_S:
   case RISCV::PseudoFROUND_S_INX:
   case RISCV::PseudoFROUND_D:
+  case RISCV::PseudoFROUND_D_INX:
     return emitFROUND(MI, BB, Subtarget);
   }
 }
@@ -15616,7 +15636,7 @@ bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
   case MVT::f32:
     return Subtarget.hasStdExtFOrZfinx();
   case MVT::f64:
-    return Subtarget.hasStdExtD();
+    return Subtarget.hasStdExtDOrZdinx();
   default:
     break;
   }

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index f2f547e991d6..dbde8725c178 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1271,6 +1271,7 @@ bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
   case RISCV::FSGNJ_D:
   case RISCV::FSGNJ_S:
   case RISCV::FSGNJ_H:
+  case RISCV::FSGNJ_D_INX:
   case RISCV::FSGNJ_S_INX:
     // The canonical floating-point move is fsgnj rd, rs, rs.
     return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
@@ -1301,6 +1302,7 @@ RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
   case RISCV::FSGNJ_D:
   case RISCV::FSGNJ_S:
   case RISCV::FSGNJ_H:
+  case RISCV::FSGNJ_D_INX:
   case RISCV::FSGNJ_S_INX:
     // The canonical floating-point move is fsgnj rd, rs, rs.
     if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
index b2fba9d62075..45aa1e5d6fdd 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
@@ -80,6 +80,8 @@ def XD_IN32X : ExtInfo_rr<Zdinx32Ext, GPR,        FPR64IN32X>;
 def XD_64    : ExtInfo_rr<D64Ext,     GPR,        FPR64>;
 
 defvar DINX    = [D,     D_INX,  D_IN32X];
+// TODO: Remove DIN64X when Zdinx for RV32 supported
+defvar DIN64X  = [D,     D_INX];
 defvar DDINX   = [DD,    DD_INX, DD_IN32X];
 defvar DXINX   = [DX,    DX_INX, DX_IN32X];
 defvar DFINX   = [DF,    DF_INX, DF_IN32X];
@@ -218,6 +220,10 @@ def : InstAlias<"fgt.d $rd, $rs, $rt",
                 (FLT_D_INX GPR:$rd, FPR64INX:$rt, FPR64INX:$rs), 0>;
 def : InstAlias<"fge.d $rd, $rs, $rt",
                 (FLE_D_INX GPR:$rd, FPR64INX:$rt, FPR64INX:$rs), 0>;
+let usesCustomInserter = 1 in {
+def PseudoQuietFLE_D_INX : PseudoQuietFCMP<FPR64INX>;
+def PseudoQuietFLT_D_INX : PseudoQuietFCMP<FPR64INX>;
+}
 } // Predicates = [HasStdExtZdinx, IsRV64]
 
 let Predicates = [HasStdExtZdinx, IsRV32] in {
@@ -241,17 +247,27 @@ let Predicates = [HasStdExtD] in {
 // f64 -> f32, f32 -> f64
 def : Pat<(any_fpround FPR64:$rs1), (FCVT_S_D FPR64:$rs1, FRM_DYN)>;
 def : Pat<(any_fpextend FPR32:$rs1), (FCVT_D_S FPR32:$rs1)>;
+} // Predicates = [HasStdExtD]
+
+let Predicates = [HasStdExtZdinx, IsRV64] in {
+/// Float conversion operations
+
+// f64 -> f32, f32 -> f64
+def : Pat<(any_fpround FPR64INX:$rs1), (FCVT_S_D_INX FPR64INX:$rs1, FRM_DYN)>;
+def : Pat<(any_fpextend FPR32INX:$rs1), (FCVT_D_S_INX FPR32INX:$rs1)>;
+} // Predicates = [HasStdExtZdinx, IsRV64]
 
 // [u]int<->double conversion patterns must be gated on IsRV32 or IsRV64, so
 // are defined later.
 
 /// Float arithmetic operations
 
-def : PatFprFprDynFrm<any_fadd, FADD_D, FPR64>;
-def : PatFprFprDynFrm<any_fsub, FSUB_D, FPR64>;
-def : PatFprFprDynFrm<any_fmul, FMUL_D, FPR64>;
-def : PatFprFprDynFrm<any_fdiv, FDIV_D, FPR64>;
+defm : PatFprFprDynFrm_m<any_fadd, FADD_D, DINX>;
+defm : PatFprFprDynFrm_m<any_fsub, FSUB_D, DINX>;
+defm : PatFprFprDynFrm_m<any_fmul, FMUL_D, DINX>;
+defm : PatFprFprDynFrm_m<any_fdiv, FDIV_D, DINX>;
 
+let Predicates = [HasStdExtD] in {
 def : Pat<(any_fsqrt FPR64:$rs1), (FSQRT_D FPR64:$rs1, FRM_DYN)>;
 
 def : Pat<(fneg FPR64:$rs1), (FSGNJN_D $rs1, $rs1)>;
@@ -284,25 +300,64 @@ def : Pat<(any_fma (fneg FPR64:$rs1), FPR64:$rs2, (fneg FPR64:$rs3)),
 // fnmadd: -(rs1 * rs2 + rs3) (the nsz flag on the FMA)
 def : Pat<(fneg (any_fma_nsz FPR64:$rs1, FPR64:$rs2, FPR64:$rs3)),
           (FNMADD_D FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, FRM_DYN)>;
+} // Predicates = [HasStdExtD]
+
+let Predicates = [HasStdExtZdinx, IsRV64] in {
+def : Pat<(any_fsqrt FPR64INX:$rs1), (FSQRT_D_INX FPR64INX:$rs1, FRM_DYN)>;
+
+def : Pat<(fneg FPR64INX:$rs1), (FSGNJN_D_INX $rs1, $rs1)>;
+def : Pat<(fabs FPR64INX:$rs1), (FSGNJX_D_INX $rs1, $rs1)>;
+
+def : Pat<(riscv_fpclass FPR64INX:$rs1), (FCLASS_D_INX $rs1)>;
+
+def : PatFprFpr<fcopysign, FSGNJ_D_INX, FPR64INX>;
+def : Pat<(fcopysign FPR64INX:$rs1, (fneg FPR64INX:$rs2)),
+          (FSGNJN_D_INX $rs1, $rs2)>;
+def : Pat<(fcopysign FPR64INX:$rs1, FPR32INX:$rs2),
+          (FSGNJ_D_INX $rs1, (FCVT_D_S_INX $rs2))>;
+def : Pat<(fcopysign FPR32INX:$rs1, FPR64INX:$rs2),
+          (FSGNJ_S_INX $rs1, (FCVT_S_D_INX $rs2, FRM_DYN))>;
+
+// fmadd: rs1 * rs2 + rs3
+def : Pat<(any_fma FPR64INX:$rs1, FPR64INX:$rs2, FPR64INX:$rs3),
+          (FMADD_D_INX $rs1, $rs2, $rs3, FRM_DYN)>;
+
+// fmsub: rs1 * rs2 - rs3
+def : Pat<(any_fma FPR64INX:$rs1, FPR64INX:$rs2, (fneg FPR64INX:$rs3)),
+          (FMSUB_D_INX FPR64INX:$rs1, FPR64INX:$rs2, FPR64INX:$rs3, FRM_DYN)>;
+
+// fnmsub: -rs1 * rs2 + rs3
+def : Pat<(any_fma (fneg FPR64INX:$rs1), FPR64INX:$rs2, FPR64INX:$rs3),
+          (FNMSUB_D_INX FPR64INX:$rs1, FPR64INX:$rs2, FPR64INX:$rs3, FRM_DYN)>;
+
+// fnmadd: -rs1 * rs2 - rs3
+def : Pat<(any_fma (fneg FPR64INX:$rs1), FPR64INX:$rs2, (fneg FPR64INX:$rs3)),
+          (FNMADD_D_INX FPR64INX:$rs1, FPR64INX:$rs2, FPR64INX:$rs3, FRM_DYN)>;
+
+// fnmadd: -(rs1 * rs2 + rs3) (the nsz flag on the FMA)
+def : Pat<(fneg (any_fma_nsz FPR64INX:$rs1, FPR64INX:$rs2, FPR64INX:$rs3)),
+          (FNMADD_D_INX FPR64INX:$rs1, FPR64INX:$rs2, FPR64INX:$rs3, FRM_DYN)>;
+} // Predicates = [HasStdExtZdinx, IsRV64]
 
 // The ratified 20191213 ISA spec defines fmin and fmax in a way that matches
 // LLVM's fminnum and fmaxnum.
 // <https://github.com/riscv/riscv-isa-manual/commit/cd20cee7efd9bac7c5aa127ec3b451749d2b3cce>.
-def : PatFprFpr<fminnum, FMIN_D, FPR64>;
-def : PatFprFpr<fmaxnum, FMAX_D, FPR64>;
+defm : PatFprFpr_m<fminnum, FMIN_D, DINX>;
+defm : PatFprFpr_m<fmaxnum, FMAX_D, DINX>;
 
 /// Setcc
 // FIXME: SETEQ/SETLT/SETLE imply nonans, can we pick better instructions for
 // strict versions of those.
 
 // Match non-signaling FEQ_D
-def : PatSetCC<FPR64, any_fsetcc, SETEQ, FEQ_D>;
-def : PatSetCC<FPR64, any_fsetcc, SETOEQ, FEQ_D>;
-def : PatSetCC<FPR64, strict_fsetcc, SETLT, PseudoQuietFLT_D>;
-def : PatSetCC<FPR64, strict_fsetcc, SETOLT, PseudoQuietFLT_D>;
-def : PatSetCC<FPR64, strict_fsetcc, SETLE, PseudoQuietFLE_D>;
-def : PatSetCC<FPR64, strict_fsetcc, SETOLE, PseudoQuietFLE_D>;
+defm : PatSetCC_m<any_fsetcc,    SETEQ,  FEQ_D,            DINX>;
+defm : PatSetCC_m<any_fsetcc,    SETOEQ, FEQ_D,            DINX>;
+defm : PatSetCC_m<strict_fsetcc, SETLT,  PseudoQuietFLT_D, DIN64X>;
+defm : PatSetCC_m<strict_fsetcc, SETOLT, PseudoQuietFLT_D, DIN64X>;
+defm : PatSetCC_m<strict_fsetcc, SETLE,  PseudoQuietFLE_D, DIN64X>;
+defm : PatSetCC_m<strict_fsetcc, SETOLE, PseudoQuietFLE_D, DIN64X>;
 
+let Predicates = [HasStdExtD] in {
 // Match signaling FEQ_D
 def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs2, SETEQ),
           (AND (FLE_D $rs1, $rs2),
@@ -320,7 +375,29 @@ def : PatSetCC<FPR64, any_fsetccs, SETLT, FLT_D>;
 def : PatSetCC<FPR64, any_fsetccs, SETOLT, FLT_D>;
 def : PatSetCC<FPR64, any_fsetccs, SETLE, FLE_D>;
 def : PatSetCC<FPR64, any_fsetccs, SETOLE, FLE_D>;
+} // Predicates = [HasStdExtD]
+
+let Predicates = [HasStdExtZdinx, IsRV64] in {
+// Match signaling FEQ_D
+def : Pat<(strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs2, SETEQ),
+          (AND (FLE_D_INX $rs1, $rs2),
+               (FLE_D_INX $rs2, $rs1))>;
+def : Pat<(strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs2, SETOEQ),
+          (AND (FLE_D_INX $rs1, $rs2),
+               (FLE_D_INX $rs2, $rs1))>;
+// If both operands are the same, use a single FLE.
+def : Pat<(strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs1, SETEQ),
+          (FLE_D_INX $rs1, $rs1)>;
+def : Pat<(strict_fsetccs FPR64INX:$rs1, FPR64INX:$rs1, SETOEQ),
+          (FLE_D_INX $rs1, $rs1)>;
+
+def : PatSetCC<FPR64INX, any_fsetccs, SETLT,  FLT_D_INX>;
+def : PatSetCC<FPR64INX, any_fsetccs, SETOLT, FLT_D_INX>;
+def : PatSetCC<FPR64INX, any_fsetccs, SETLE,  FLE_D_INX>;
+def : PatSetCC<FPR64INX, any_fsetccs, SETOLE, FLE_D_INX>;
+} // Predicates = [HasStdExtZdinx, IsRV64]
 
+let Predicates = [HasStdExtD] in {
 defm Select_FPR64 : SelectCC_GPR_rrirr<FPR64>;
 
 def PseudoFROUND_D : PseudoFROUND<FPR64>;
@@ -349,6 +426,20 @@ def SplitF64Pseudo
 
 } // Predicates = [HasStdExtD]
 
+let Predicates = [HasStdExtZdinx, IsRV64] in {
+defm Select_FPR64INX : SelectCC_GPR_rrirr<FPR64INX>;
+
+def PseudoFROUND_D_INX : PseudoFROUND<FPR64INX>;
+
+/// Loads
+def : Pat<(f64 (load (AddrRegImm GPR:$rs1, simm12:$imm12))),
+          (COPY_TO_REGCLASS (LD GPR:$rs1, simm12:$imm12), GPRF64)>;
+
+/// Stores
+def : Pat<(store (f64 FPR64INX:$rs2), (AddrRegImm GPR:$rs1, simm12:$imm12)),
+          (SD (COPY_TO_REGCLASS FPR64INX:$rs2, GPR), GPR:$rs1, simm12:$imm12)>;
+} // Predicates = [HasStdExtZdinx, IsRV64]
+
 let Predicates = [HasStdExtD, IsRV32] in {
 
 // double->[u]int. Round-to-zero must be used.
@@ -406,3 +497,40 @@ def : Pat<(i64 (any_llround FPR64:$rs1)), (FCVT_L_D $rs1, FRM_RMM)>;
 def : Pat<(any_sint_to_fp (i64 GPR:$rs1)), (FCVT_D_L GPR:$rs1, FRM_DYN)>;
 def : Pat<(any_uint_to_fp (i64 GPR:$rs1)), (FCVT_D_LU GPR:$rs1, FRM_DYN)>;
 } // Predicates = [HasStdExtD, IsRV64]
+
+let Predicates = [HasStdExtZdinx, IsRV64] in {
+
+// Moves (no conversion)
+def : Pat<(f64 (bitconvert (i64 GPR:$rs1))), (COPY_TO_REGCLASS GPR:$rs1, GPRF64)>;
+def : Pat<(i64 (bitconvert FPR64INX:$rs1)), (COPY_TO_REGCLASS FPR64INX:$rs1, GPR)>;
+
+// Use target specific isd nodes to help us remember the result is sign
+// extended. Matching sext_inreg+fptoui/fptosi may cause the conversion to be
+// duplicated if it has another user that didn't need the sign_extend.
+def : Pat<(riscv_any_fcvt_w_rv64 FPR64INX:$rs1, timm:$frm),  (FCVT_W_D_INX $rs1, timm:$frm)>;
+def : Pat<(riscv_any_fcvt_wu_rv64 FPR64INX:$rs1, timm:$frm), (FCVT_WU_D_INX $rs1, timm:$frm)>;
+
+// [u]int32->fp
+def : Pat<(any_sint_to_fp (i64 (sexti32 (i64 GPR:$rs1)))), (FCVT_D_W_INX $rs1)>;
+def : Pat<(any_uint_to_fp (i64 (zexti32 (i64 GPR:$rs1)))), (FCVT_D_WU_INX $rs1)>;
+
+// Saturating double->[u]int64.
+def : Pat<(i64 (riscv_fcvt_x FPR64INX:$rs1, timm:$frm)), (FCVT_L_D_INX $rs1, timm:$frm)>;
+def : Pat<(i64 (riscv_fcvt_xu FPR64INX:$rs1, timm:$frm)), (FCVT_LU_D_INX $rs1, timm:$frm)>;
+
+// double->[u]int64. Round-to-zero must be used.
+def : Pat<(i64 (any_fp_to_sint FPR64INX:$rs1)), (FCVT_L_D_INX FPR64INX:$rs1, FRM_RTZ)>;
+def : Pat<(i64 (any_fp_to_uint FPR64INX:$rs1)), (FCVT_LU_D_INX FPR64INX:$rs1, FRM_RTZ)>;
+
+// double->int64 with current rounding mode.
+def : Pat<(i64 (any_lrint FPR64INX:$rs1)), (FCVT_L_D_INX $rs1, FRM_DYN)>;
+def : Pat<(i64 (any_llrint FPR64INX:$rs1)), (FCVT_L_D_INX $rs1, FRM_DYN)>;
+
+// double->int64 rounded to nearest with ties rounded away from zero.
+def : Pat<(i64 (any_lround FPR64INX:$rs1)), (FCVT_L_D_INX $rs1, FRM_RMM)>;
+def : Pat<(i64 (any_llround FPR64INX:$rs1)), (FCVT_L_D_INX $rs1, FRM_RMM)>;
+
+// [u]int64->fp. Match GCC and default to using dynamic rounding mode.
+def : Pat<(any_sint_to_fp (i64 GPR:$rs1)), (FCVT_D_L_INX GPR:$rs1, FRM_DYN)>;
+def : Pat<(any_uint_to_fp (i64 GPR:$rs1)), (FCVT_D_LU_INX GPR:$rs1, FRM_DYN)>;
+} // Predicates = [HasStdExtZdinx, IsRV64]

diff  --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index 1e5d5e0d84ff..576b185e7473 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -114,6 +114,7 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
   bool hasStdExtCOrZca() const { return HasStdExtC || HasStdExtZca; }
   bool hasStdExtZvl() const { return ZvlLen != 0; }
   bool hasStdExtFOrZfinx() const { return HasStdExtF || HasStdExtZfinx; }
+  bool hasStdExtDOrZdinx() const { return HasStdExtD || HasStdExtZdinx; }
   bool hasStdExtZfhOrZfhmin() const { return HasStdExtZfh || HasStdExtZfhmin; }
   bool is64Bit() const { return IsRV64; }
   MVT getXLenVT() const { return XLenVT; }

diff  --git a/llvm/test/CodeGen/RISCV/double-arith-strict.ll b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
index aa278b7513e4..2e84e24470f6 100644
--- a/llvm/test/CodeGen/RISCV/double-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith-strict.ll
@@ -5,6 +5,9 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   -disable-strictnode-mutation -target-abi=lp64d \
 ; RUN:   | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   -disable-strictnode-mutation -target-abi=lp64 \
+; RUN:   | FileCheck -check-prefix=RV64IZFINXZDINX %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -16,6 +19,11 @@ define double @fadd_d(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    fadd.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fadd_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fadd_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -44,6 +52,11 @@ define double @fsub_d(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    fsub.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fsub_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fsub.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fsub_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -72,6 +85,11 @@ define double @fmul_d(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    fmul.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmul_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fmul.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmul_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -100,6 +118,11 @@ define double @fdiv_d(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    fdiv.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fdiv_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fdiv.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fdiv_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -128,6 +151,11 @@ define double @fsqrt_d(double %a) nounwind strictfp {
 ; CHECKIFD-NEXT:    fsqrt.d fa0, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fsqrt_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fsqrt.d a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fsqrt_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -169,6 +197,15 @@ define double @fmin_d(double %a, double %b) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmin_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call fmin at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmin_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -210,6 +247,15 @@ define double @fmax_d(double %a, double %b) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmax_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call fmax at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmax_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -238,6 +284,11 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind strictfp {
 ; CHECKIFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmadd_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fmadd.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmadd_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -275,6 +326,12 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV64IFD-NEXT:    fmsub.d fa0, fa0, fa1, fa5
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmsub_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
+; RV64IZFINXZDINX-NEXT:    fmsub.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmsub_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -353,6 +410,13 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV64IFD-NEXT:    fnmadd.d fa0, fa4, fa1, fa5
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fnmadd_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, zero
+; RV64IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
+; RV64IZFINXZDINX-NEXT:    fnmadd.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fnmadd_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -449,6 +513,13 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp {
 ; RV64IFD-NEXT:    fnmadd.d fa0, fa4, fa0, fa5
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fnmadd_d_2:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a1, a1, zero
+; RV64IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
+; RV64IZFINXZDINX-NEXT:    fnmadd.d a0, a1, a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fnmadd_d_2:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -544,6 +615,12 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind strictfp {
 ; RV64IFD-NEXT:    fnmsub.d fa0, fa5, fa1, fa2
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fnmsub_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, zero
+; RV64IZFINXZDINX-NEXT:    fnmsub.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fnmsub_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -616,6 +693,12 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp {
 ; RV64IFD-NEXT:    fnmsub.d fa0, fa5, fa0, fa2
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fnmsub_d_2:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a1, a1, zero
+; RV64IZFINXZDINX-NEXT:    fnmsub.d a0, a1, a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fnmsub_d_2:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32

diff  --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll
index 708271c0f43f..1ba56c80d808 100644
--- a/llvm/test/CodeGen/RISCV/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith.ll
@@ -3,6 +3,8 @@
 ; RUN:   -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64 | FileCheck -check-prefix=RV64IZFINXZDINX %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -19,6 +21,11 @@ define double @fadd_d(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    fadd.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fadd_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fadd_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -46,6 +53,11 @@ define double @fsub_d(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    fsub.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fsub_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fsub.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fsub_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -73,6 +85,11 @@ define double @fmul_d(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    fmul.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmul_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fmul.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmul_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -100,6 +117,11 @@ define double @fdiv_d(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    fdiv.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fdiv_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fdiv.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fdiv_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -129,6 +151,11 @@ define double @fsqrt_d(double %a) nounwind {
 ; CHECKIFD-NEXT:    fsqrt.d fa0, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fsqrt_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fsqrt.d a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fsqrt_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -158,6 +185,11 @@ define double @fsgnj_d(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    fsgnj.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fsgnj_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fsgnj_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, 524288
@@ -189,6 +221,13 @@ define i32 @fneg_d(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    feq.d a0, fa5, fa4
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fneg_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    fneg.d a1, a0
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fneg_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -235,6 +274,14 @@ define double @fsgnjn_d(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    fsgnjn.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fsgnjn_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    li a2, -1
+; RV64IZFINXZDINX-NEXT:    slli a2, a2, 63
+; RV64IZFINXZDINX-NEXT:    xor a1, a1, a2
+; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fsgnjn_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    not a2, a3
@@ -271,6 +318,13 @@ define double @fabs_d(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    fadd.d fa0, fa4, fa5
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fabs_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    fabs.d a1, a0
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fabs_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -311,6 +365,11 @@ define double @fmin_d(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    fmin.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmin_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fmin.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmin_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -340,6 +399,11 @@ define double @fmax_d(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    fmax.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmax_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fmax.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmax_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -369,6 +433,11 @@ define double @fmadd_d(double %a, double %b, double %c) nounwind {
 ; CHECKIFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmadd_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fmadd.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmadd_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -405,6 +474,12 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
 ; RV64IFD-NEXT:    fmsub.d fa0, fa0, fa1, fa5
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmsub_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
+; RV64IZFINXZDINX-NEXT:    fmsub.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmsub_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -483,6 +558,13 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 ; RV64IFD-NEXT:    fnmadd.d fa0, fa4, fa1, fa5
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fnmadd_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, zero
+; RV64IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
+; RV64IZFINXZDINX-NEXT:    fnmadd.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fnmadd_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -579,6 +661,13 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
 ; RV64IFD-NEXT:    fnmadd.d fa0, fa4, fa0, fa5
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fnmadd_d_2:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a1, a1, zero
+; RV64IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
+; RV64IZFINXZDINX-NEXT:    fnmadd.d a0, a1, a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fnmadd_d_2:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -666,6 +755,14 @@ define double @fnmadd_d_3(double %a, double %b, double %c) nounwind {
 ; CHECKIFD-NEXT:    fneg.d fa0, fa5
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fnmadd_d_3:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fmadd.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    li a1, -1
+; RV64IZFINXZDINX-NEXT:    slli a1, a1, 63
+; RV64IZFINXZDINX-NEXT:    xor a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fnmadd_d_3:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -700,6 +797,14 @@ define double @fnmadd_nsz(double %a, double %b, double %c) nounwind {
 ; CHECKIFD-NEXT:    fnmadd.d fa0, fa0, fa1, fa2
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fnmadd_nsz:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fmadd.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    li a1, -1
+; RV64IZFINXZDINX-NEXT:    slli a1, a1, 63
+; RV64IZFINXZDINX-NEXT:    xor a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fnmadd_nsz:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -742,6 +847,12 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
 ; RV64IFD-NEXT:    fnmsub.d fa0, fa5, fa1, fa2
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fnmsub_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, zero
+; RV64IZFINXZDINX-NEXT:    fnmsub.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fnmsub_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -814,6 +925,12 @@ define double @fnmsub_d_2(double %a, double %b, double %c) nounwind {
 ; RV64IFD-NEXT:    fnmsub.d fa0, fa5, fa0, fa2
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fnmsub_d_2:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a1, a1, zero
+; RV64IZFINXZDINX-NEXT:    fnmsub.d a0, a1, a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fnmsub_d_2:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -881,6 +998,11 @@ define double @fmadd_d_contract(double %a, double %b, double %c) nounwind {
 ; CHECKIFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmadd_d_contract:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fmadd.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmadd_d_contract:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -932,6 +1054,12 @@ define double @fmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV64IFD-NEXT:    fmsub.d fa0, fa0, fa1, fa5
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmsub_d_contract:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
+; RV64IZFINXZDINX-NEXT:    fmsub.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmsub_d_contract:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -1020,6 +1148,14 @@ define double @fnmadd_d_contract(double %a, double %b, double %c) nounwind {
 ; RV64IFD-NEXT:    fnmadd.d fa0, fa4, fa3, fa5
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fnmadd_d_contract:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, zero
+; RV64IZFINXZDINX-NEXT:    fadd.d a1, a1, zero
+; RV64IZFINXZDINX-NEXT:    fadd.d a2, a2, zero
+; RV64IZFINXZDINX-NEXT:    fnmadd.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fnmadd_d_contract:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -1133,6 +1269,13 @@ define double @fnmsub_d_contract(double %a, double %b, double %c) nounwind {
 ; RV64IFD-NEXT:    fnmsub.d fa0, fa4, fa5, fa2
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fnmsub_d_contract:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, zero
+; RV64IZFINXZDINX-NEXT:    fadd.d a1, a1, zero
+; RV64IZFINXZDINX-NEXT:    fnmsub.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fnmsub_d_contract:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32

diff  --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
index 5bf27f5dded3..b294f88df84f 100644
--- a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
@@ -7,6 +7,8 @@
 ; RUN:   | FileCheck -check-prefix=RV64I %s
 ; RUN: llc -mtriple=riscv64 -target-abi=lp64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV64IFD %s
+; RUN: llc -mtriple=riscv64 -target-abi=lp64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IZFINXZDINX %s
 ;
 ; This file tests cases where simple floating point operations can be
 ; profitably handled though bit manipulation if a soft-float ABI is being used
@@ -43,6 +45,13 @@ define double @fneg(double %a) nounwind {
 ; RV64IFD-NEXT:    slli a1, a1, 63
 ; RV64IFD-NEXT:    xor a0, a0, a1
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: fneg:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    li a1, -1
+; RV64IZFINXZDINX-NEXT:    slli a1, a1, 63
+; RV64IZFINXZDINX-NEXT:    xor a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %1 = fneg double %a
   ret double %1
 }
@@ -73,6 +82,12 @@ define double @fabs(double %a) nounwind {
 ; RV64IFD-NEXT:    slli a0, a0, 1
 ; RV64IFD-NEXT:    srli a0, a0, 1
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: fabs:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    slli a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    srli a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    ret
   %1 = call double @llvm.fabs.f64(double %a)
   ret double %1
 }
@@ -130,6 +145,14 @@ define double @fcopysign_fneg(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    fsgnj.d fa5, fa4, fa5
 ; RV64IFD-NEXT:    fmv.x.d a0, fa5
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: fcopysign_fneg:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    li a2, -1
+; RV64IZFINXZDINX-NEXT:    slli a2, a2, 63
+; RV64IZFINXZDINX-NEXT:    xor a1, a1, a2
+; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %1 = fneg double %b
   %2 = call double @llvm.copysign.f64(double %a, double %1)
   ret double %2

diff  --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
index 513221301d65..dd68a94f7836 100644
--- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
@@ -3,6 +3,8 @@
 ; RUN:   -target-abi=ilp32d | FileCheck -check-prefix=RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   -target-abi=lp64d | FileCheck -check-prefix=RV64IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64 | FileCheck -check-prefix=RV64IZFINXZDINX %s
 
 declare void @abort()
 declare void @exit(i32)
@@ -29,6 +31,17 @@ define void @br_fcmp_false(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_false:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    li a0, 1
+; RV64IZFINXZDINX-NEXT:    bnez a0, .LBB0_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.then
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB0_2: # %if.else
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp false double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -60,6 +73,17 @@ define void @br_fcmp_oeq(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_oeq:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    bnez a0, .LBB1_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB1_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp oeq double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -94,6 +118,17 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_oeq_alt:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    bnez a0, .LBB2_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB2_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp oeq double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -125,6 +160,17 @@ define void @br_fcmp_ogt(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_ogt:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    flt.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    bnez a0, .LBB3_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB3_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp ogt double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -156,6 +202,17 @@ define void @br_fcmp_oge(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_oge:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fle.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    bnez a0, .LBB4_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB4_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp oge double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -187,6 +244,17 @@ define void @br_fcmp_olt(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_olt:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    flt.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    bnez a0, .LBB5_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB5_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp olt double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -218,6 +286,17 @@ define void @br_fcmp_ole(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_ole:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fle.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    bnez a0, .LBB6_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB6_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp ole double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -253,6 +332,19 @@ define void @br_fcmp_one(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_one:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    flt.d a2, a0, a1
+; RV64IZFINXZDINX-NEXT:    flt.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    or a0, a0, a2
+; RV64IZFINXZDINX-NEXT:    bnez a0, .LBB7_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB7_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp one double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -288,6 +380,19 @@ define void @br_fcmp_ord(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_ord:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    feq.d a1, a1, a1
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    bnez a0, .LBB8_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB8_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp ord double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -323,6 +428,19 @@ define void @br_fcmp_ueq(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_ueq:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    flt.d a2, a0, a1
+; RV64IZFINXZDINX-NEXT:    flt.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    or a0, a0, a2
+; RV64IZFINXZDINX-NEXT:    beqz a0, .LBB9_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB9_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp ueq double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -354,6 +472,17 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_ugt:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fle.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    beqz a0, .LBB10_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB10_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp ugt double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -385,6 +514,17 @@ define void @br_fcmp_uge(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_uge:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    flt.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    beqz a0, .LBB11_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB11_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp uge double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -416,6 +556,17 @@ define void @br_fcmp_ult(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_ult:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fle.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    beqz a0, .LBB12_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB12_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp ult double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -447,6 +598,17 @@ define void @br_fcmp_ule(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_ule:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    flt.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    beqz a0, .LBB13_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB13_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp ule double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -478,6 +640,17 @@ define void @br_fcmp_une(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_une:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    beqz a0, .LBB14_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB14_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp une double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -513,6 +686,19 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_uno:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    feq.d a1, a1, a1
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    beqz a0, .LBB15_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB15_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp uno double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -544,6 +730,17 @@ define void @br_fcmp_true(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, -16
 ; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IFD-NEXT:    call abort at plt
+;
+; RV64IZFINXZDINX-LABEL: br_fcmp_true:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    li a0, 1
+; RV64IZFINXZDINX-NEXT:    bnez a0, .LBB16_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    ret
+; RV64IZFINXZDINX-NEXT:  .LBB16_2: # %if.then
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call abort at plt
   %1 = fcmp true double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:

diff  --git a/llvm/test/CodeGen/RISCV/double-convert-strict.ll b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
index 1c8291d3ac27..bd78dc5ecd25 100644
--- a/llvm/test/CodeGen/RISCV/double-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
@@ -5,6 +5,9 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   -disable-strictnode-mutation -target-abi=lp64d \
 ; RUN:   | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   -disable-strictnode-mutation -target-abi=lp64 \
+; RUN:   | FileCheck -check-prefix=RV64IZFINXZDINX %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -20,6 +23,11 @@ define float @fcvt_s_d(double %a) nounwind strictfp {
 ; CHECKIFD-NEXT:    fcvt.s.d fa0, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_s_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.s.d a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_s_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -48,6 +56,11 @@ define double @fcvt_d_s(float %a) nounwind strictfp {
 ; CHECKIFD-NEXT:    fcvt.d.s fa0, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_s:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.s a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_s:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -76,6 +89,11 @@ define i32 @fcvt_w_d(double %a) nounwind strictfp {
 ; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rtz
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_w_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_w_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -106,6 +124,11 @@ define i32 @fcvt_wu_d(double %a) nounwind strictfp {
 ; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rtz
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_wu_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_wu_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -138,6 +161,13 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
 ; CHECKIFD-NEXT:    add a0, a0, a1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    seqz a1, a0
+; RV64IZFINXZDINX-NEXT:    add a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_wu_d_multiple_use:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -171,6 +201,11 @@ define double @fcvt_d_w(i32 %a) nounwind strictfp {
 ; CHECKIFD-NEXT:    fcvt.d.w fa0, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_w:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_w:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -201,6 +236,12 @@ define double @fcvt_d_w_load(ptr %p) nounwind strictfp {
 ; CHECKIFD-NEXT:    fcvt.d.w fa0, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_w_load:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lw a0, 0(a0)
+; RV64IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_w_load:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -231,6 +272,11 @@ define double @fcvt_d_wu(i32 %a) nounwind strictfp {
 ; CHECKIFD-NEXT:    fcvt.d.wu fa0, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_wu:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_wu:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -267,6 +313,12 @@ define double @fcvt_d_wu_load(ptr %p) nounwind strictfp {
 ; RV64IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_wu_load:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lwu a0, 0(a0)
+; RV64IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_wu_load:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -306,6 +358,11 @@ define i64 @fcvt_l_d(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_l_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_l_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -343,6 +400,11 @@ define i64 @fcvt_lu_d(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_lu_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_lu_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -380,6 +442,11 @@ define double @fcvt_d_l(i64 %a) nounwind strictfp {
 ; RV64IFD-NEXT:    fcvt.d.l fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_l:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.l a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_l:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -417,6 +484,11 @@ define double @fcvt_d_lu(i64 %a) nounwind strictfp {
 ; RV64IFD-NEXT:    fcvt.d.lu fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_lu:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.lu a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_lu:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -445,6 +517,11 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp {
 ; CHECKIFD-NEXT:    fcvt.d.w fa0, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_w_i8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_w_i8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -473,6 +550,11 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp {
 ; CHECKIFD-NEXT:    fcvt.d.wu fa0, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_wu_i8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -501,6 +583,11 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp {
 ; CHECKIFD-NEXT:    fcvt.d.w fa0, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_w_i16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_w_i16:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -529,6 +616,11 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp {
 ; CHECKIFD-NEXT:    fcvt.d.wu fa0, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_wu_i16:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -567,6 +659,15 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64IFD-NEXT:    fsd fa5, 0(a1)
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_w_demanded_bits:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addiw a2, a0, 1
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
+; RV64IZFINXZDINX-NEXT:    sd a0, 0(a1)
+; RV64IZFINXZDINX-NEXT:    mv a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_w_demanded_bits:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -625,6 +726,13 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64IFD-NEXT:    fsd fa5, 0(a1)
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_wu_demanded_bits:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addiw a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    fcvt.d.wu a2, a0
+; RV64IZFINXZDINX-NEXT:    sd a2, 0(a1)
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_wu_demanded_bits:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16

diff  --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index f207d8b615ff..249395f1e36e 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -3,6 +3,8 @@
 ; RUN:   -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64 | FileCheck -check-prefixes=RV64IZFINXZDINX %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -14,6 +16,11 @@ define float @fcvt_s_d(double %a) nounwind {
 ; CHECKIFD-NEXT:    fcvt.s.d fa0, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_s_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.s.d a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_s_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -41,6 +48,11 @@ define double @fcvt_d_s(float %a) nounwind {
 ; CHECKIFD-NEXT:    fcvt.d.s fa0, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_s:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.s a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_s:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -68,6 +80,11 @@ define i32 @fcvt_w_d(double %a) nounwind {
 ; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rtz
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_w_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_w_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -99,6 +116,15 @@ define i32 @fcvt_w_d_sat(double %a) nounwind {
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_w_d_sat:
+; RV64IZFINXZDINX:       # %bb.0: # %start
+; RV64IZFINXZDINX-NEXT:    fcvt.w.d a1, a0, rtz
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_w_d_sat:
 ; RV32I:       # %bb.0: # %start
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -209,6 +235,11 @@ define i32 @fcvt_wu_d(double %a) nounwind {
 ; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rtz
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_wu_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_wu_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -240,6 +271,13 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
 ; CHECKIFD-NEXT:    add a0, a0, a1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    seqz a1, a0
+; RV64IZFINXZDINX-NEXT:    add a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_wu_d_multiple_use:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -288,6 +326,17 @@ define i32 @fcvt_wu_d_sat(double %a) nounwind {
 ; RV64IFD-NEXT:    srli a0, a0, 32
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_wu_d_sat:
+; RV64IZFINXZDINX:       # %bb.0: # %start
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a1, a0, rtz
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addiw a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    slli a0, a0, 32
+; RV64IZFINXZDINX-NEXT:    srli a0, a0, 32
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_wu_d_sat:
 ; RV32I:       # %bb.0: # %start
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -372,6 +421,11 @@ define double @fcvt_d_w(i32 %a) nounwind {
 ; CHECKIFD-NEXT:    fcvt.d.w fa0, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_w:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_w:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -401,6 +455,12 @@ define double @fcvt_d_w_load(ptr %p) nounwind {
 ; CHECKIFD-NEXT:    fcvt.d.w fa0, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_w_load:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lw a0, 0(a0)
+; RV64IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_w_load:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -431,6 +491,11 @@ define double @fcvt_d_wu(i32 %a) nounwind {
 ; CHECKIFD-NEXT:    fcvt.d.wu fa0, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_wu:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_wu:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -466,6 +531,12 @@ define double @fcvt_d_wu_load(ptr %p) nounwind {
 ; RV64IFD-NEXT:    fcvt.d.wu fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_wu_load:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lwu a0, 0(a0)
+; RV64IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_wu_load:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -505,6 +576,11 @@ define i64 @fcvt_l_d(double %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_l_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_l_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -574,6 +650,15 @@ define i64 @fcvt_l_d_sat(double %a) nounwind {
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_l_d_sat:
+; RV64IZFINXZDINX:       # %bb.0: # %start
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rtz
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_l_d_sat:
 ; RV32I:       # %bb.0: # %start
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -700,6 +785,11 @@ define i64 @fcvt_lu_d(double %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_lu_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_lu_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -756,6 +846,15 @@ define i64 @fcvt_lu_d_sat(double %a) nounwind {
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_lu_d_sat:
+; RV64IZFINXZDINX:       # %bb.0: # %start
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a1, a0, rtz
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_lu_d_sat:
 ; RV32I:       # %bb.0: # %start
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -844,6 +943,11 @@ define i64 @fmv_x_d(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    fmv.x.d a0, fa5
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmv_x_d:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmv_x_d:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -881,6 +985,11 @@ define double @fcvt_d_l(i64 %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.d.l fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_l:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.l a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_l:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -917,6 +1026,11 @@ define double @fcvt_d_lu(i64 %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.d.lu fa0, a0
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_lu:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.lu a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_lu:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -960,6 +1074,11 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind {
 ; RV64IFD-NEXT:    fadd.d fa0, fa5, fa4
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmv_d_x:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmv_d_x:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -989,6 +1108,11 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind {
 ; CHECKIFD-NEXT:    fcvt.d.w fa0, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_w_i8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_w_i8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1016,6 +1140,11 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind {
 ; CHECKIFD-NEXT:    fcvt.d.wu fa0, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_wu_i8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1043,6 +1172,11 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind {
 ; CHECKIFD-NEXT:    fcvt.d.w fa0, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_w_i16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_w_i16:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1070,6 +1204,11 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind {
 ; CHECKIFD-NEXT:    fcvt.d.wu fa0, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.d.wu a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_wu_i16:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1107,6 +1246,15 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64IFD-NEXT:    fsd fa5, 0(a1)
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_w_demanded_bits:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addiw a2, a0, 1
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    fcvt.d.w a0, a0
+; RV64IZFINXZDINX-NEXT:    sd a0, 0(a1)
+; RV64IZFINXZDINX-NEXT:    mv a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_w_demanded_bits:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1165,6 +1313,13 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64IFD-NEXT:    fsd fa5, 0(a1)
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_d_wu_demanded_bits:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addiw a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    fcvt.d.wu a2, a0
+; RV64IZFINXZDINX-NEXT:    sd a2, 0(a1)
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_d_wu_demanded_bits:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1218,6 +1373,11 @@ define signext i16 @fcvt_w_s_i16(double %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_w_s_i16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_w_s_i16:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1268,6 +1428,20 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
 ; RV64IFD-NEXT:    and a0, a0, a1
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_w_s_sat_i16:
+; RV64IZFINXZDINX:       # %bb.0: # %start
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(.LCPI26_0)
+; RV64IZFINXZDINX-NEXT:    ld a1, %lo(.LCPI26_0)(a1)
+; RV64IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI26_1)
+; RV64IZFINXZDINX-NEXT:    ld a2, %lo(.LCPI26_1)(a2)
+; RV64IZFINXZDINX-NEXT:    fmax.d a1, a0, a1
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    neg a0, a0
+; RV64IZFINXZDINX-NEXT:    fmin.d a1, a1, a2
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a1, rtz
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_w_s_sat_i16:
 ; RV32I:       # %bb.0: # %start
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -1383,6 +1557,11 @@ define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_wu_s_i16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_wu_s_i16:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1425,6 +1604,15 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa5, rtz
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_wu_s_sat_i16:
+; RV64IZFINXZDINX:       # %bb.0: # %start
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(.LCPI28_0)
+; RV64IZFINXZDINX-NEXT:    ld a1, %lo(.LCPI28_0)(a1)
+; RV64IZFINXZDINX-NEXT:    fmax.d a0, a0, zero
+; RV64IZFINXZDINX-NEXT:    fmin.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_wu_s_sat_i16:
 ; RV32I:       # %bb.0: # %start
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -1523,6 +1711,11 @@ define signext i8 @fcvt_w_s_i8(double %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_w_s_i8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_w_s_i8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1573,6 +1766,20 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
 ; RV64IFD-NEXT:    and a0, a0, a1
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_w_s_sat_i8:
+; RV64IZFINXZDINX:       # %bb.0: # %start
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(.LCPI30_0)
+; RV64IZFINXZDINX-NEXT:    ld a1, %lo(.LCPI30_0)(a1)
+; RV64IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI30_1)
+; RV64IZFINXZDINX-NEXT:    ld a2, %lo(.LCPI30_1)(a2)
+; RV64IZFINXZDINX-NEXT:    fmax.d a1, a0, a1
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    neg a0, a0
+; RV64IZFINXZDINX-NEXT:    fmin.d a1, a1, a2
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a1, rtz
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_w_s_sat_i8:
 ; RV32I:       # %bb.0: # %start
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -1685,6 +1892,11 @@ define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_wu_s_i8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_wu_s_i8:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1729,6 +1941,15 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa5, rtz
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_wu_s_sat_i8:
+; RV64IZFINXZDINX:       # %bb.0: # %start
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(.LCPI32_0)
+; RV64IZFINXZDINX-NEXT:    ld a1, %lo(.LCPI32_0)(a1)
+; RV64IZFINXZDINX-NEXT:    fmax.d a0, a0, zero
+; RV64IZFINXZDINX-NEXT:    fmin.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_wu_s_sat_i8:
 ; RV32I:       # %bb.0: # %start
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -1831,6 +2052,17 @@ define zeroext i32 @fcvt_wu_d_sat_zext(double %a) nounwind {
 ; RV64IFD-NEXT:    srli a0, a0, 32
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_wu_d_sat_zext:
+; RV64IZFINXZDINX:       # %bb.0: # %start
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a1, a0, rtz
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addiw a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    slli a0, a0, 32
+; RV64IZFINXZDINX-NEXT:    srli a0, a0, 32
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_wu_d_sat_zext:
 ; RV32I:       # %bb.0: # %start
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -1920,6 +2152,15 @@ define signext i32 @fcvt_w_d_sat_sext(double %a) nounwind {
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcvt_w_d_sat_sext:
+; RV64IZFINXZDINX:       # %bb.0: # %start
+; RV64IZFINXZDINX-NEXT:    fcvt.w.d a1, a0, rtz
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcvt_w_d_sat_sext:
 ; RV32I:       # %bb.0: # %start
 ; RV32I-NEXT:    addi sp, sp, -32

diff  --git a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
index e6cf098eae57..0cac56200578 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll
@@ -5,6 +5,9 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   -disable-strictnode-mutation -target-abi=lp64d \
 ; RUN:   | FileCheck -check-prefix=CHECKIFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   -disable-strictnode-mutation -target-abi=lp64 \
+; RUN:   | FileCheck -check-prefix=RV64IZFINXZDINX %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -16,6 +19,11 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    feq.d a0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmp_oeq:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_oeq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -50,6 +58,15 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    feq.d zero, fa1, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmp_ogt:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    csrr a3, fflags
+; RV64IZFINXZDINX-NEXT:    flt.d a2, a1, a0
+; RV64IZFINXZDINX-NEXT:    csrw fflags, a3
+; RV64IZFINXZDINX-NEXT:    feq.d zero, a1, a0
+; RV64IZFINXZDINX-NEXT:    mv a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_ogt:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -83,6 +100,15 @@ define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    feq.d zero, fa1, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmp_oge:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    csrr a3, fflags
+; RV64IZFINXZDINX-NEXT:    fle.d a2, a1, a0
+; RV64IZFINXZDINX-NEXT:    csrw fflags, a3
+; RV64IZFINXZDINX-NEXT:    feq.d zero, a1, a0
+; RV64IZFINXZDINX-NEXT:    mv a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_oge:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -118,6 +144,15 @@ define i32 @fcmp_olt(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    feq.d zero, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmp_olt:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    csrr a3, fflags
+; RV64IZFINXZDINX-NEXT:    flt.d a2, a0, a1
+; RV64IZFINXZDINX-NEXT:    csrw fflags, a3
+; RV64IZFINXZDINX-NEXT:    feq.d zero, a0, a1
+; RV64IZFINXZDINX-NEXT:    mv a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_olt:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -151,6 +186,15 @@ define i32 @fcmp_ole(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    feq.d zero, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmp_ole:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    csrr a3, fflags
+; RV64IZFINXZDINX-NEXT:    fle.d a2, a0, a1
+; RV64IZFINXZDINX-NEXT:    csrw fflags, a3
+; RV64IZFINXZDINX-NEXT:    feq.d zero, a0, a1
+; RV64IZFINXZDINX-NEXT:    mv a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_ole:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -191,6 +235,20 @@ define i32 @fcmp_one(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    feq.d zero, fa1, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmp_one:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    csrr a2, fflags
+; RV64IZFINXZDINX-NEXT:    flt.d a3, a0, a1
+; RV64IZFINXZDINX-NEXT:    csrw fflags, a2
+; RV64IZFINXZDINX-NEXT:    feq.d zero, a0, a1
+; RV64IZFINXZDINX-NEXT:    csrr a2, fflags
+; RV64IZFINXZDINX-NEXT:    flt.d a4, a1, a0
+; RV64IZFINXZDINX-NEXT:    csrw fflags, a2
+; RV64IZFINXZDINX-NEXT:    or a2, a4, a3
+; RV64IZFINXZDINX-NEXT:    feq.d zero, a1, a0
+; RV64IZFINXZDINX-NEXT:    mv a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_one:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -257,6 +315,13 @@ define i32 @fcmp_ord(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmp_ord:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    feq.d a1, a1, a1
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_ord:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -298,6 +363,21 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    feq.d zero, fa1, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmp_ueq:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    csrr a2, fflags
+; RV64IZFINXZDINX-NEXT:    flt.d a3, a0, a1
+; RV64IZFINXZDINX-NEXT:    csrw fflags, a2
+; RV64IZFINXZDINX-NEXT:    feq.d zero, a0, a1
+; RV64IZFINXZDINX-NEXT:    csrr a2, fflags
+; RV64IZFINXZDINX-NEXT:    flt.d a4, a1, a0
+; RV64IZFINXZDINX-NEXT:    csrw fflags, a2
+; RV64IZFINXZDINX-NEXT:    or a3, a4, a3
+; RV64IZFINXZDINX-NEXT:    xori a2, a3, 1
+; RV64IZFINXZDINX-NEXT:    feq.d zero, a1, a0
+; RV64IZFINXZDINX-NEXT:    mv a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_ueq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -366,6 +446,16 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    feq.d zero, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmp_ugt:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    csrr a2, fflags
+; RV64IZFINXZDINX-NEXT:    fle.d a3, a0, a1
+; RV64IZFINXZDINX-NEXT:    csrw fflags, a2
+; RV64IZFINXZDINX-NEXT:    xori a2, a3, 1
+; RV64IZFINXZDINX-NEXT:    feq.d zero, a0, a1
+; RV64IZFINXZDINX-NEXT:    mv a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_ugt:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -400,6 +490,16 @@ define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    feq.d zero, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmp_uge:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    csrr a2, fflags
+; RV64IZFINXZDINX-NEXT:    flt.d a3, a0, a1
+; RV64IZFINXZDINX-NEXT:    csrw fflags, a2
+; RV64IZFINXZDINX-NEXT:    xori a2, a3, 1
+; RV64IZFINXZDINX-NEXT:    feq.d zero, a0, a1
+; RV64IZFINXZDINX-NEXT:    mv a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_uge:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -436,6 +536,16 @@ define i32 @fcmp_ult(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    feq.d zero, fa1, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmp_ult:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    csrr a2, fflags
+; RV64IZFINXZDINX-NEXT:    fle.d a3, a1, a0
+; RV64IZFINXZDINX-NEXT:    csrw fflags, a2
+; RV64IZFINXZDINX-NEXT:    xori a2, a3, 1
+; RV64IZFINXZDINX-NEXT:    feq.d zero, a1, a0
+; RV64IZFINXZDINX-NEXT:    mv a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_ult:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -470,6 +580,16 @@ define i32 @fcmp_ule(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    feq.d zero, fa1, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmp_ule:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    csrr a2, fflags
+; RV64IZFINXZDINX-NEXT:    flt.d a3, a1, a0
+; RV64IZFINXZDINX-NEXT:    csrw fflags, a2
+; RV64IZFINXZDINX-NEXT:    xori a2, a3, 1
+; RV64IZFINXZDINX-NEXT:    feq.d zero, a1, a0
+; RV64IZFINXZDINX-NEXT:    mv a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_ule:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -501,6 +621,12 @@ define i32 @fcmp_une(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmp_une:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_une:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -534,6 +660,14 @@ define i32 @fcmp_uno(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmp_uno:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    feq.d a1, a1, a1
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_uno:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -566,6 +700,13 @@ define i32 @fcmps_oeq(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmps_oeq:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fle.d a2, a1, a0
+; RV64IZFINXZDINX-NEXT:    fle.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmps_oeq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -597,6 +738,11 @@ define i32 @fcmps_ogt(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    flt.d a0, fa1, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmps_ogt:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    flt.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmps_ogt:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -627,6 +773,11 @@ define i32 @fcmps_oge(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    fle.d a0, fa1, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmps_oge:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fle.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmps_oge:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -659,6 +810,11 @@ define i32 @fcmps_olt(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    flt.d a0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmps_olt:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    flt.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmps_olt:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -689,6 +845,11 @@ define i32 @fcmps_ole(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    fle.d a0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmps_ole:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fle.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmps_ole:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -721,6 +882,13 @@ define i32 @fcmps_one(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    or a0, a1, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmps_one:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    flt.d a2, a0, a1
+; RV64IZFINXZDINX-NEXT:    flt.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    or a0, a0, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmps_one:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -787,6 +955,13 @@ define i32 @fcmps_ord(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmps_ord:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fle.d a1, a1, a1
+; RV64IZFINXZDINX-NEXT:    fle.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmps_ord:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -820,6 +995,14 @@ define i32 @fcmps_ueq(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmps_ueq:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    flt.d a2, a0, a1
+; RV64IZFINXZDINX-NEXT:    flt.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    or a0, a0, a2
+; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmps_ueq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -885,6 +1068,12 @@ define i32 @fcmps_ugt(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmps_ugt:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fle.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmps_ugt:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -916,6 +1105,12 @@ define i32 @fcmps_uge(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmps_uge:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    flt.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmps_uge:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -949,6 +1144,12 @@ define i32 @fcmps_ult(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmps_ult:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fle.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmps_ult:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -980,6 +1181,12 @@ define i32 @fcmps_ule(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmps_ule:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    flt.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmps_ule:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1013,6 +1220,14 @@ define i32 @fcmps_une(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmps_une:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fle.d a2, a1, a0
+; RV64IZFINXZDINX-NEXT:    fle.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a2
+; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmps_une:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1046,6 +1261,14 @@ define i32 @fcmps_uno(double %a, double %b) nounwind strictfp {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fcmps_uno:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fle.d a1, a1, a1
+; RV64IZFINXZDINX-NEXT:    fle.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmps_uno:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16

diff  --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll
index 9f59dd21106a..d22df14c5d7c 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll
@@ -3,6 +3,8 @@
 ; RUN:   -target-abi=ilp32d | FileCheck -check-prefix=CHECKIFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   -target-abi=lp64d | FileCheck -check-prefix=CHECKIFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64 | FileCheck -check-prefix=CHECKIZFINXZDINX %s
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32I %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -14,6 +16,11 @@ define i32 @fcmp_false(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    li a0, 0
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_false:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    li a0, 0
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_false:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    li a0, 0
@@ -34,6 +41,11 @@ define i32 @fcmp_oeq(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    feq.d a0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_oeq:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    feq.d a0, a0, a1
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_oeq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -64,6 +76,11 @@ define i32 @fcmp_ogt(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    flt.d a0, fa1, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_ogt:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    flt.d a0, a1, a0
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_ogt:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -94,6 +111,11 @@ define i32 @fcmp_oge(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    fle.d a0, fa1, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_oge:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    fle.d a0, a1, a0
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_oge:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -126,6 +148,11 @@ define i32 @fcmp_olt(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    flt.d a0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_olt:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    flt.d a0, a0, a1
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_olt:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -156,6 +183,11 @@ define i32 @fcmp_ole(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    fle.d a0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_ole:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    fle.d a0, a0, a1
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_ole:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -188,6 +220,13 @@ define i32 @fcmp_one(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    or a0, a1, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_one:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    flt.d a2, a0, a1
+; CHECKIZFINXZDINX-NEXT:    flt.d a0, a1, a0
+; CHECKIZFINXZDINX-NEXT:    or a0, a0, a2
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_one:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -254,6 +293,13 @@ define i32 @fcmp_ord(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_ord:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    feq.d a1, a1, a1
+; CHECKIZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; CHECKIZFINXZDINX-NEXT:    and a0, a0, a1
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_ord:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -287,6 +333,14 @@ define i32 @fcmp_ueq(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_ueq:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    flt.d a2, a0, a1
+; CHECKIZFINXZDINX-NEXT:    flt.d a0, a1, a0
+; CHECKIZFINXZDINX-NEXT:    or a0, a0, a2
+; CHECKIZFINXZDINX-NEXT:    xori a0, a0, 1
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_ueq:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -352,6 +406,12 @@ define i32 @fcmp_ugt(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_ugt:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    fle.d a0, a0, a1
+; CHECKIZFINXZDINX-NEXT:    xori a0, a0, 1
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_ugt:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -383,6 +443,12 @@ define i32 @fcmp_uge(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_uge:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    flt.d a0, a0, a1
+; CHECKIZFINXZDINX-NEXT:    xori a0, a0, 1
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_uge:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -416,6 +482,12 @@ define i32 @fcmp_ult(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_ult:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    fle.d a0, a1, a0
+; CHECKIZFINXZDINX-NEXT:    xori a0, a0, 1
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_ult:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -447,6 +519,12 @@ define i32 @fcmp_ule(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_ule:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    flt.d a0, a1, a0
+; CHECKIZFINXZDINX-NEXT:    xori a0, a0, 1
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_ule:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -478,6 +556,12 @@ define i32 @fcmp_une(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_une:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    feq.d a0, a0, a1
+; CHECKIZFINXZDINX-NEXT:    xori a0, a0, 1
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_une:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -511,6 +595,14 @@ define i32 @fcmp_uno(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    xori a0, a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_uno:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    feq.d a1, a1, a1
+; CHECKIZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; CHECKIZFINXZDINX-NEXT:    and a0, a0, a1
+; CHECKIZFINXZDINX-NEXT:    xori a0, a0, 1
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_uno:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -541,6 +633,11 @@ define i32 @fcmp_true(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    li a0, 1
 ; CHECKIFD-NEXT:    ret
 ;
+; CHECKIZFINXZDINX-LABEL: fcmp_true:
+; CHECKIZFINXZDINX:       # %bb.0:
+; CHECKIZFINXZDINX-NEXT:    li a0, 1
+; CHECKIZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fcmp_true:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    li a0, 1

diff  --git a/llvm/test/CodeGen/RISCV/double-frem.ll b/llvm/test/CodeGen/RISCV/double-frem.ll
index 555793ec291c..1a7bd79763c8 100644
--- a/llvm/test/CodeGen/RISCV/double-frem.ll
+++ b/llvm/test/CodeGen/RISCV/double-frem.ll
@@ -3,6 +3,8 @@
 ; RUN:   | FileCheck -check-prefix=RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV64IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IZFINXZDINX %s
 
 define double @frem_f64(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: frem_f64:
@@ -12,6 +14,10 @@ define double @frem_f64(double %a, double %b) nounwind {
 ; RV64IFD-LABEL: frem_f64:
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    tail fmod at plt
+;
+; RV64IZFINXZDINX-LABEL: frem_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    tail fmod at plt
   %1 = frem double %a, %b
   ret double %1
 }

diff  --git a/llvm/test/CodeGen/RISCV/double-imm.ll b/llvm/test/CodeGen/RISCV/double-imm.ll
index d0b0ddd1ca02..62954586eb59 100644
--- a/llvm/test/CodeGen/RISCV/double-imm.ll
+++ b/llvm/test/CodeGen/RISCV/double-imm.ll
@@ -3,6 +3,8 @@
 ; RUN:   -target-abi=ilp32d | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   -target-abi=lp64d | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64 | FileCheck --check-prefix=CHECKRV64ZDINX %s
 
 define double @double_imm() nounwind {
 ; CHECK-LABEL: double_imm:
@@ -10,6 +12,12 @@ define double @double_imm() nounwind {
 ; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
 ; CHECK-NEXT:    fld fa0, %lo(.LCPI0_0)(a0)
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: double_imm:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    lui a0, %hi(.LCPI0_0)
+; CHECKRV64ZDINX-NEXT:    ld a0, %lo(.LCPI0_0)(a0)
+; CHECKRV64ZDINX-NEXT:    ret
   ret double 3.1415926535897931159979634685441851615905761718750
 }
 
@@ -20,6 +28,13 @@ define double @double_imm_op(double %a) nounwind {
 ; CHECK-NEXT:    fld fa5, %lo(.LCPI1_0)(a0)
 ; CHECK-NEXT:    fadd.d fa0, fa0, fa5
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: double_imm_op:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    lui a1, %hi(.LCPI1_0)
+; CHECKRV64ZDINX-NEXT:    ld a1, %lo(.LCPI1_0)(a1)
+; CHECKRV64ZDINX-NEXT:    fadd.d a0, a0, a1
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fadd double %a, 1.0
   ret double %1
 }

diff  --git a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
index 1ce3f57e9c95..33369da90bae 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll
@@ -5,6 +5,9 @@
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \
 ; RUN:   -verify-machineinstrs -disable-strictnode-mutation -target-abi=lp64d \
 ; RUN:   | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zdinx \
+; RUN:   -verify-machineinstrs -disable-strictnode-mutation -target-abi=lp64 \
+; RUN:   | FileCheck -check-prefix=RV64IZFINXZDINX %s
 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 \
 ; RUN:   -verify-machineinstrs -disable-strictnode-mutation \
 ; RUN:   | FileCheck -check-prefix=RV32I %s
@@ -20,6 +23,11 @@ define double @sqrt_f64(double %a) nounwind strictfp {
 ; CHECKIFD-NEXT:    fsqrt.d fa0, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: sqrt_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fsqrt.d a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: sqrt_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -63,6 +71,16 @@ define double @powi_f64(double %a, i32 %b) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: powi_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    sext.w a1, a1
+; RV64IZFINXZDINX-NEXT:    call __powidf2 at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: powi_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -106,6 +124,15 @@ define double @sin_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: sin_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call sin at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: sin_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -148,6 +175,15 @@ define double @cos_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: cos_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call cos at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: cos_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -207,6 +243,24 @@ define double @sincos_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 32
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: sincos_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -32
+; RV64IZFINXZDINX-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    mv s0, a0
+; RV64IZFINXZDINX-NEXT:    call sin at plt
+; RV64IZFINXZDINX-NEXT:    mv s1, a0
+; RV64IZFINXZDINX-NEXT:    mv a0, s0
+; RV64IZFINXZDINX-NEXT:    call cos at plt
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, s1, a0
+; RV64IZFINXZDINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 32
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: sincos_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -282,6 +336,15 @@ define double @pow_f64(double %a, double %b) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: pow_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call pow at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: pow_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -324,6 +387,15 @@ define double @exp_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: exp_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call exp at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: exp_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -366,6 +438,15 @@ define double @exp2_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: exp2_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call exp2 at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: exp2_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -408,6 +489,15 @@ define double @log_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: log_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call log at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: log_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -450,6 +540,15 @@ define double @log10_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: log10_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call log10 at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: log10_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -492,6 +591,15 @@ define double @log2_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: log2_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call log2 at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: log2_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -521,6 +629,11 @@ define double @fma_f64(double %a, double %b, double %c) nounwind strictfp {
 ; CHECKIFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fma_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fmadd.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fma_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -550,6 +663,11 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind strictfp {
 ; CHECKIFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmuladd_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fmadd.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmuladd_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -606,6 +724,15 @@ define double @minnum_f64(double %a, double %b) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: minnum_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call fmin at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: minnum_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -648,6 +775,15 @@ define double @maxnum_f64(double %a, double %b) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: maxnum_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call fmax at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: maxnum_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -707,6 +843,15 @@ define double @floor_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: floor_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call floor at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: floor_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -749,6 +894,15 @@ define double @ceil_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: ceil_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call ceil at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: ceil_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -791,6 +945,15 @@ define double @trunc_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: trunc_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call trunc at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: trunc_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -833,6 +996,15 @@ define double @rint_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: rint_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call rint at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: rint_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -875,6 +1047,15 @@ define double @nearbyint_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: nearbyint_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call nearbyint at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: nearbyint_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -917,6 +1098,15 @@ define double @round_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: round_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call round at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: round_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -959,6 +1149,15 @@ define double @roundeven_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: roundeven_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    call roundeven at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: roundeven_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -993,6 +1192,11 @@ define iXLen @lrint_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: lrint_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: lrint_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1027,6 +1231,11 @@ define iXLen @lround_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: lround_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rmm
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: lround_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1065,6 +1274,11 @@ define i64 @llrint_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: llrint_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: llrint_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1103,6 +1317,11 @@ define i64 @llround_f64(double %a) nounwind strictfp {
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: llround_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rmm
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: llround_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16

diff  --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
index 38aea142b910..a3056ce891f0 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -5,6 +5,9 @@
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \
 ; RUN:   -verify-machineinstrs -target-abi=lp64d \
 ; RUN:   | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zdinx \
+; RUN:   -verify-machineinstrs -target-abi=lp64 \
+; RUN:   | FileCheck -check-prefix=RV64IZFINXZDINX %s
 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 \
 ; RUN:   -verify-machineinstrs | FileCheck -check-prefix=RV32I %s
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 \
@@ -18,6 +21,11 @@ define double @sqrt_f64(double %a) nounwind {
 ; CHECKIFD-NEXT:    fsqrt.d fa0, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: sqrt_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fsqrt.d a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: sqrt_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -56,6 +64,16 @@ define double @powi_f64(double %a, i32 %b) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: powi_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    sext.w a1, a1
+; RV64IZFINXZDINX-NEXT:    call __powidf2 at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: powi_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -85,6 +103,10 @@ define double @sin_f64(double %a) nounwind {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    tail sin at plt
 ;
+; RV64IZFINXZDINX-LABEL: sin_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    tail sin at plt
+;
 ; RV32I-LABEL: sin_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -113,6 +135,10 @@ define double @cos_f64(double %a) nounwind {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    tail cos at plt
 ;
+; RV64IZFINXZDINX-LABEL: cos_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    tail cos at plt
+;
 ; RV32I-LABEL: cos_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -172,6 +198,24 @@ define double @sincos_f64(double %a) nounwind {
 ; RV64IFD-NEXT:    addi sp, sp, 32
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: sincos_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -32
+; RV64IZFINXZDINX-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    mv s0, a0
+; RV64IZFINXZDINX-NEXT:    call sin at plt
+; RV64IZFINXZDINX-NEXT:    mv s1, a0
+; RV64IZFINXZDINX-NEXT:    mv a0, s0
+; RV64IZFINXZDINX-NEXT:    call cos at plt
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, s1, a0
+; RV64IZFINXZDINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 32
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: sincos_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -32
@@ -233,6 +277,10 @@ define double @pow_f64(double %a, double %b) nounwind {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    tail pow at plt
 ;
+; RV64IZFINXZDINX-LABEL: pow_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    tail pow at plt
+;
 ; RV32I-LABEL: pow_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -261,6 +309,10 @@ define double @exp_f64(double %a) nounwind {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    tail exp at plt
 ;
+; RV64IZFINXZDINX-LABEL: exp_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    tail exp at plt
+;
 ; RV32I-LABEL: exp_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -289,6 +341,10 @@ define double @exp2_f64(double %a) nounwind {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    tail exp2 at plt
 ;
+; RV64IZFINXZDINX-LABEL: exp2_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    tail exp2 at plt
+;
 ; RV32I-LABEL: exp2_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -317,6 +373,10 @@ define double @log_f64(double %a) nounwind {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    tail log at plt
 ;
+; RV64IZFINXZDINX-LABEL: log_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    tail log at plt
+;
 ; RV32I-LABEL: log_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -345,6 +405,10 @@ define double @log10_f64(double %a) nounwind {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    tail log10 at plt
 ;
+; RV64IZFINXZDINX-LABEL: log10_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    tail log10 at plt
+;
 ; RV32I-LABEL: log10_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -373,6 +437,10 @@ define double @log2_f64(double %a) nounwind {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    tail log2 at plt
 ;
+; RV64IZFINXZDINX-LABEL: log2_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    tail log2 at plt
+;
 ; RV32I-LABEL: log2_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -402,6 +470,11 @@ define double @fma_f64(double %a, double %b, double %c) nounwind {
 ; CHECKIFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fma_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fmadd.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fma_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -431,6 +504,11 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
 ; CHECKIFD-NEXT:    fmadd.d fa0, fa0, fa1, fa2
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fmuladd_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fmadd.d a0, a0, a1, a2
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fmuladd_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -474,6 +552,12 @@ define double @fabs_f64(double %a) nounwind {
 ; CHECKIFD-NEXT:    fabs.d fa0, fa0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: fabs_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    slli a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    srli a0, a0, 1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: fabs_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a1, a1, 1
@@ -497,6 +581,11 @@ define double @minnum_f64(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    fmin.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: minnum_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fmin.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: minnum_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -526,6 +615,11 @@ define double @maxnum_f64(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    fmax.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: maxnum_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fmax.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: maxnum_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -572,6 +666,11 @@ define double @copysign_f64(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    fsgnj.d fa0, fa0, fa1
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: copysign_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: copysign_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    lui a2, 524288
@@ -614,6 +713,20 @@ define double @floor_f64(double %a) nounwind {
 ; RV64IFD-NEXT:  .LBB17_2:
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: floor_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(.LCPI17_0)
+; RV64IZFINXZDINX-NEXT:    ld a1, %lo(.LCPI17_0)(a1)
+; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
+; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
+; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB17_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rdn
+; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1, rdn
+; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:  .LBB17_2:
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: floor_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -656,6 +769,20 @@ define double @ceil_f64(double %a) nounwind {
 ; RV64IFD-NEXT:  .LBB18_2:
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: ceil_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(.LCPI18_0)
+; RV64IZFINXZDINX-NEXT:    ld a1, %lo(.LCPI18_0)(a1)
+; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
+; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
+; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB18_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rup
+; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1, rup
+; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:  .LBB18_2:
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: ceil_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -698,6 +825,20 @@ define double @trunc_f64(double %a) nounwind {
 ; RV64IFD-NEXT:  .LBB19_2:
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: trunc_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(.LCPI19_0)
+; RV64IZFINXZDINX-NEXT:    ld a1, %lo(.LCPI19_0)(a1)
+; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
+; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
+; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB19_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rtz
+; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1, rtz
+; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:  .LBB19_2:
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: trunc_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -740,6 +881,20 @@ define double @rint_f64(double %a) nounwind {
 ; RV64IFD-NEXT:  .LBB20_2:
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: rint_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(.LCPI20_0)
+; RV64IZFINXZDINX-NEXT:    ld a1, %lo(.LCPI20_0)(a1)
+; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
+; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
+; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB20_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0
+; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1
+; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:  .LBB20_2:
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: rint_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -768,6 +923,10 @@ define double @nearbyint_f64(double %a) nounwind {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    tail nearbyint at plt
 ;
+; RV64IZFINXZDINX-LABEL: nearbyint_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    tail nearbyint at plt
+;
 ; RV32I-LABEL: nearbyint_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -810,6 +969,20 @@ define double @round_f64(double %a) nounwind {
 ; RV64IFD-NEXT:  .LBB22_2:
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: round_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(.LCPI22_0)
+; RV64IZFINXZDINX-NEXT:    ld a1, %lo(.LCPI22_0)(a1)
+; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
+; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
+; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB22_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rmm
+; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1, rmm
+; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:  .LBB22_2:
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: round_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -852,6 +1025,20 @@ define double @roundeven_f64(double %a) nounwind {
 ; RV64IFD-NEXT:  .LBB23_2:
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: roundeven_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(.LCPI23_0)
+; RV64IZFINXZDINX-NEXT:    ld a1, %lo(.LCPI23_0)(a1)
+; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
+; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
+; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB23_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rne
+; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1, rne
+; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:  .LBB23_2:
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: roundeven_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -886,6 +1073,11 @@ define iXLen @lrint_f64(double %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: lrint_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: lrint_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -921,6 +1113,11 @@ define iXLen @lround_f64(double %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: lround_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rmm
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: lround_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -948,6 +1145,11 @@ define i32 @lround_i32_f64(double %a) nounwind {
 ; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rmm
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: lround_i32_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rmm
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: lround_i32_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -986,6 +1188,11 @@ define i64 @llrint_f64(double %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: llrint_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: llrint_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1024,6 +1231,11 @@ define i64 @llround_f64(double %a) nounwind {
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
 ; RV64IFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: llround_f64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rmm
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: llround_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
@@ -1054,6 +1266,13 @@ define i1 @isnan_d_fpclass(double %x) {
 ; CHECKIFD-NEXT:    snez a0, a0
 ; CHECKIFD-NEXT:    ret
 ;
+; RV64IZFINXZDINX-LABEL: isnan_d_fpclass:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fclass.d a0, a0
+; RV64IZFINXZDINX-NEXT:    andi a0, a0, 768
+; RV64IZFINXZDINX-NEXT:    snez a0, a0
+; RV64IZFINXZDINX-NEXT:    ret
+;
 ; RV32I-LABEL: isnan_d_fpclass:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    slli a1, a1, 1

diff  --git a/llvm/test/CodeGen/RISCV/double-isnan.ll b/llvm/test/CodeGen/RISCV/double-isnan.ll
index b0bc37738037..9edf2ce72abe 100644
--- a/llvm/test/CodeGen/RISCV/double-isnan.ll
+++ b/llvm/test/CodeGen/RISCV/double-isnan.ll
@@ -3,6 +3,8 @@
 ; RUN:   < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi lp64d -verify-machineinstrs \
 ; RUN:   < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -target-abi lp64 -verify-machineinstrs \
+; RUN:   < %s | FileCheck --check-prefix=CHECKRV64ZDINX %s
 
 define zeroext i1 @double_is_nan(double %a) nounwind {
 ; CHECK-LABEL: double_is_nan:
@@ -10,6 +12,12 @@ define zeroext i1 @double_is_nan(double %a) nounwind {
 ; CHECK-NEXT:    feq.d a0, fa0, fa0
 ; CHECK-NEXT:    xori a0, a0, 1
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: double_is_nan:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    feq.d a0, a0, a0
+; CHECKRV64ZDINX-NEXT:    xori a0, a0, 1
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp uno double %a, 0.000000e+00
   ret i1 %1
 }
@@ -19,6 +27,11 @@ define zeroext i1 @double_not_nan(double %a) nounwind {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    feq.d a0, fa0, fa0
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: double_not_nan:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    feq.d a0, a0, a0
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp ord double %a, 0.000000e+00
   ret i1 %1
 }

diff  --git a/llvm/test/CodeGen/RISCV/double-mem.ll b/llvm/test/CodeGen/RISCV/double-mem.ll
index 7b029de61770..93b03d969da5 100644
--- a/llvm/test/CodeGen/RISCV/double-mem.ll
+++ b/llvm/test/CodeGen/RISCV/double-mem.ll
@@ -3,6 +3,8 @@
 ; RUN:   -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64 | FileCheck -check-prefixes=RV64IZFINXZDINX %s
 
 define dso_local double @fld(ptr %a) nounwind {
 ; CHECKIFD-LABEL: fld:
@@ -11,6 +13,13 @@ define dso_local double @fld(ptr %a) nounwind {
 ; CHECKIFD-NEXT:    fld fa4, 24(a0)
 ; CHECKIFD-NEXT:    fadd.d fa0, fa5, fa4
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: fld:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    ld a1, 0(a0)
+; RV64IZFINXZDINX-NEXT:    ld a0, 24(a0)
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:    ret
   %1 = load double, ptr %a
   %2 = getelementptr double, ptr %a, i32 3
   %3 = load double, ptr %2
@@ -27,6 +36,13 @@ define dso_local void @fsd(ptr %a, double %b, double %c) nounwind {
 ; CHECKIFD-NEXT:    fsd fa5, 0(a0)
 ; CHECKIFD-NEXT:    fsd fa5, 64(a0)
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: fsd:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a1, a1, a2
+; RV64IZFINXZDINX-NEXT:    sd a1, 0(a0)
+; RV64IZFINXZDINX-NEXT:    sd a1, 64(a0)
+; RV64IZFINXZDINX-NEXT:    ret
 ; Use %b and %c in an FP op to ensure floating point registers are used, even
 ; for the soft float ABI
   %1 = fadd double %b, %c
@@ -50,6 +66,17 @@ define dso_local double @fld_fsd_global(double %a, double %b) nounwind {
 ; CHECKIFD-NEXT:    fld fa5, 72(a1)
 ; CHECKIFD-NEXT:    fsd fa0, 72(a1)
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: fld_fsd_global:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(G)
+; RV64IZFINXZDINX-NEXT:    ld a2, %lo(G)(a1)
+; RV64IZFINXZDINX-NEXT:    addi a2, a1, %lo(G)
+; RV64IZFINXZDINX-NEXT:    sd a0, %lo(G)(a1)
+; RV64IZFINXZDINX-NEXT:    ld a1, 72(a2)
+; RV64IZFINXZDINX-NEXT:    sd a0, 72(a2)
+; RV64IZFINXZDINX-NEXT:    ret
 ; Use %a and %b in an FP op to ensure floating point registers are used, even
 ; for the soft float ABI
   %1 = fadd double %a, %b
@@ -79,6 +106,15 @@ define dso_local double @fld_fsd_constant(double %a) nounwind {
 ; RV64IFD-NEXT:    fadd.d fa0, fa0, fa5
 ; RV64IFD-NEXT:    fsd fa0, -273(a0)
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: fld_fsd_constant:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lui a1, 228023
+; RV64IZFINXZDINX-NEXT:    slli a1, a1, 2
+; RV64IZFINXZDINX-NEXT:    ld a2, -273(a1)
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, a2
+; RV64IZFINXZDINX-NEXT:    sd a0, -273(a1)
+; RV64IZFINXZDINX-NEXT:    ret
   %1 = inttoptr i32 3735928559 to ptr
   %2 = load volatile double, ptr %1
   %3 = fadd double %a, %2
@@ -118,6 +154,21 @@ define dso_local double @fld_stack(double %a) nounwind {
 ; RV64IFD-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 32
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: fld_stack:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -32
+; RV64IZFINXZDINX-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    mv s0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, sp, 8
+; RV64IZFINXZDINX-NEXT:    call notdead at plt
+; RV64IZFINXZDINX-NEXT:    ld a0, 8(sp)
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, s0
+; RV64IZFINXZDINX-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 32
+; RV64IZFINXZDINX-NEXT:    ret
   %1 = alloca double, align 8
   call void @notdead(ptr %1)
   %2 = load double, ptr %1
@@ -149,6 +200,18 @@ define dso_local void @fsd_stack(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: fsd_stack:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    sd a0, 0(sp)
+; RV64IZFINXZDINX-NEXT:    mv a0, sp
+; RV64IZFINXZDINX-NEXT:    call notdead at plt
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
   %1 = fadd double %a, %b ; force store from FPR64
   %2 = alloca double, align 8
   store double %1, ptr %2
@@ -163,6 +226,12 @@ define dso_local void @fsd_trunc(ptr %a, double %b) nounwind noinline optnone {
 ; CHECKIFD-NEXT:    fcvt.s.d fa5, fa0
 ; CHECKIFD-NEXT:    fsw fa5, 0(a0)
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: fsd_trunc:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.s.d a1, a1
+; RV64IZFINXZDINX-NEXT:    sw a1, 0(a0)
+; RV64IZFINXZDINX-NEXT:    ret
   %1 = fptrunc double %b to float
   store float %1, ptr %a, align 4
   ret void

diff  --git a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
index 03e646b7ac17..caf857588f6d 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll
@@ -3,6 +3,8 @@
 ; RUN:   -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64 | FileCheck -check-prefixes=RV64IZFINXZDINX %s
 
 define signext i32 @test_floor_si32(double %x) {
 ; CHECKIFD-LABEL: test_floor_si32:
@@ -13,6 +15,15 @@ define signext i32 @test_floor_si32(double %x) {
 ; CHECKIFD-NEXT:    addi a1, a1, -1
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_floor_si32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.w.d a1, a0, rdn
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.floor.f64(double %x)
   %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
   ret i32 %b
@@ -66,6 +77,15 @@ define i64 @test_floor_si64(double %x) nounwind {
 ; RV64IFD-NEXT:    addi a1, a1, -1
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_floor_si64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rdn
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.floor.f64(double %x)
   %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
   ret i64 %b
@@ -80,6 +100,15 @@ define signext i32 @test_floor_ui32(double %x) {
 ; CHECKIFD-NEXT:    addi a1, a1, -1
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_floor_ui32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a1, a0, rdn
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.floor.f64(double %x)
   %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
   ret i32 %b
@@ -119,6 +148,15 @@ define i64 @test_floor_ui64(double %x) nounwind {
 ; RV64IFD-NEXT:    addi a1, a1, -1
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_floor_ui64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a1, a0, rdn
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.floor.f64(double %x)
   %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
   ret i64 %b
@@ -133,6 +171,15 @@ define signext i32 @test_ceil_si32(double %x) {
 ; CHECKIFD-NEXT:    addi a1, a1, -1
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_ceil_si32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.w.d a1, a0, rup
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.ceil.f64(double %x)
   %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
   ret i32 %b
@@ -186,6 +233,15 @@ define i64 @test_ceil_si64(double %x) nounwind {
 ; RV64IFD-NEXT:    addi a1, a1, -1
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_ceil_si64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rup
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.ceil.f64(double %x)
   %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
   ret i64 %b
@@ -200,6 +256,15 @@ define signext i32 @test_ceil_ui32(double %x) {
 ; CHECKIFD-NEXT:    addi a1, a1, -1
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_ceil_ui32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a1, a0, rup
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.ceil.f64(double %x)
   %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
   ret i32 %b
@@ -239,6 +304,15 @@ define i64 @test_ceil_ui64(double %x) nounwind {
 ; RV64IFD-NEXT:    addi a1, a1, -1
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_ceil_ui64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a1, a0, rup
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.ceil.f64(double %x)
   %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
   ret i64 %b
@@ -253,6 +327,15 @@ define signext i32 @test_trunc_si32(double %x) {
 ; CHECKIFD-NEXT:    addi a1, a1, -1
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_trunc_si32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.w.d a1, a0, rtz
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.trunc.f64(double %x)
   %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
   ret i32 %b
@@ -306,6 +389,15 @@ define i64 @test_trunc_si64(double %x) nounwind {
 ; RV64IFD-NEXT:    addi a1, a1, -1
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_trunc_si64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rtz
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.trunc.f64(double %x)
   %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
   ret i64 %b
@@ -320,6 +412,15 @@ define signext i32 @test_trunc_ui32(double %x) {
 ; CHECKIFD-NEXT:    addi a1, a1, -1
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_trunc_ui32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a1, a0, rtz
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.trunc.f64(double %x)
   %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
   ret i32 %b
@@ -359,6 +460,15 @@ define i64 @test_trunc_ui64(double %x) nounwind {
 ; RV64IFD-NEXT:    addi a1, a1, -1
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_trunc_ui64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a1, a0, rtz
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.trunc.f64(double %x)
   %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
   ret i64 %b
@@ -373,6 +483,15 @@ define signext i32 @test_round_si32(double %x) {
 ; CHECKIFD-NEXT:    addi a1, a1, -1
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_round_si32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.w.d a1, a0, rmm
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.round.f64(double %x)
   %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
   ret i32 %b
@@ -426,6 +545,15 @@ define i64 @test_round_si64(double %x) nounwind {
 ; RV64IFD-NEXT:    addi a1, a1, -1
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_round_si64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rmm
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.round.f64(double %x)
   %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
   ret i64 %b
@@ -440,6 +568,15 @@ define signext i32 @test_round_ui32(double %x) {
 ; CHECKIFD-NEXT:    addi a1, a1, -1
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_round_ui32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a1, a0, rmm
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.round.f64(double %x)
   %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
   ret i32 %b
@@ -479,6 +616,15 @@ define i64 @test_round_ui64(double %x) nounwind {
 ; RV64IFD-NEXT:    addi a1, a1, -1
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_round_ui64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a1, a0, rmm
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.round.f64(double %x)
   %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
   ret i64 %b
@@ -493,6 +639,15 @@ define signext i32 @test_roundeven_si32(double %x) {
 ; CHECKIFD-NEXT:    addi a1, a1, -1
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_roundeven_si32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.w.d a1, a0, rne
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.roundeven.f64(double %x)
   %b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
   ret i32 %b
@@ -546,6 +701,15 @@ define i64 @test_roundeven_si64(double %x) nounwind {
 ; RV64IFD-NEXT:    addi a1, a1, -1
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_roundeven_si64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rne
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.roundeven.f64(double %x)
   %b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
   ret i64 %b
@@ -560,6 +724,15 @@ define signext i32 @test_roundeven_ui32(double %x) {
 ; CHECKIFD-NEXT:    addi a1, a1, -1
 ; CHECKIFD-NEXT:    and a0, a1, a0
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_roundeven_ui32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a1, a0, rne
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.roundeven.f64(double %x)
   %b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
   ret i32 %b
@@ -599,6 +772,15 @@ define i64 @test_roundeven_ui64(double %x) nounwind {
 ; RV64IFD-NEXT:    addi a1, a1, -1
 ; RV64IFD-NEXT:    and a0, a1, a0
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_roundeven_ui64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a1, a0, rne
+; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
+; RV64IZFINXZDINX-NEXT:    seqz a0, a0
+; RV64IZFINXZDINX-NEXT:    addi a0, a0, -1
+; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.roundeven.f64(double %x)
   %b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
   ret i64 %b

diff  --git a/llvm/test/CodeGen/RISCV/double-round-conv.ll b/llvm/test/CodeGen/RISCV/double-round-conv.ll
index c67b94a972bd..97bcd0016674 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv.ll
@@ -3,6 +3,8 @@
 ; RUN:   -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64 | FileCheck -check-prefixes=RV64IZFINXZDINX %s
 
 define signext i8 @test_floor_si8(double %x) {
 ; RV32IFD-LABEL: test_floor_si8:
@@ -14,6 +16,11 @@ define signext i8 @test_floor_si8(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rdn
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_floor_si8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rdn
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.floor.f64(double %x)
   %b = fptosi double %a to i8
   ret i8 %b
@@ -29,6 +36,11 @@ define signext i16 @test_floor_si16(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rdn
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_floor_si16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rdn
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.floor.f64(double %x)
   %b = fptosi double %a to i16
   ret i16 %b
@@ -39,6 +51,11 @@ define signext i32 @test_floor_si32(double %x) {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rdn
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_floor_si32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rdn
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.floor.f64(double %x)
   %b = fptosi double %a to i32
   ret i32 %b
@@ -61,6 +78,11 @@ define i64 @test_floor_si64(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rdn
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_floor_si64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rdn
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.floor.f64(double %x)
   %b = fptosi double %a to i64
   ret i64 %b
@@ -76,6 +98,11 @@ define zeroext i8 @test_floor_ui8(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rdn
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_floor_ui8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rdn
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.floor.f64(double %x)
   %b = fptoui double %a to i8
   ret i8 %b
@@ -91,6 +118,11 @@ define zeroext i16 @test_floor_ui16(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rdn
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_floor_ui16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rdn
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.floor.f64(double %x)
   %b = fptoui double %a to i16
   ret i16 %b
@@ -101,6 +133,11 @@ define signext i32 @test_floor_ui32(double %x) {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rdn
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_floor_ui32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rdn
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.floor.f64(double %x)
   %b = fptoui double %a to i32
   ret i32 %b
@@ -123,6 +160,11 @@ define i64 @test_floor_ui64(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rdn
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_floor_ui64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rdn
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.floor.f64(double %x)
   %b = fptoui double %a to i64
   ret i64 %b
@@ -138,6 +180,11 @@ define signext i8 @test_ceil_si8(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rup
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_ceil_si8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rup
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.ceil.f64(double %x)
   %b = fptosi double %a to i8
   ret i8 %b
@@ -153,6 +200,11 @@ define signext i16 @test_ceil_si16(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rup
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_ceil_si16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rup
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.ceil.f64(double %x)
   %b = fptosi double %a to i16
   ret i16 %b
@@ -163,6 +215,11 @@ define signext i32 @test_ceil_si32(double %x) {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rup
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_ceil_si32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rup
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.ceil.f64(double %x)
   %b = fptosi double %a to i32
   ret i32 %b
@@ -185,6 +242,11 @@ define i64 @test_ceil_si64(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rup
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_ceil_si64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rup
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.ceil.f64(double %x)
   %b = fptosi double %a to i64
   ret i64 %b
@@ -200,6 +262,11 @@ define zeroext i8 @test_ceil_ui8(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rup
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_ceil_ui8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rup
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.ceil.f64(double %x)
   %b = fptoui double %a to i8
   ret i8 %b
@@ -215,6 +282,11 @@ define zeroext i16 @test_ceil_ui16(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rup
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_ceil_ui16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rup
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.ceil.f64(double %x)
   %b = fptoui double %a to i16
   ret i16 %b
@@ -225,6 +297,11 @@ define signext i32 @test_ceil_ui32(double %x) {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rup
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_ceil_ui32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rup
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.ceil.f64(double %x)
   %b = fptoui double %a to i32
   ret i32 %b
@@ -247,6 +324,11 @@ define i64 @test_ceil_ui64(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rup
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_ceil_ui64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rup
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.ceil.f64(double %x)
   %b = fptoui double %a to i64
   ret i64 %b
@@ -262,6 +344,11 @@ define signext i8 @test_trunc_si8(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_trunc_si8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.trunc.f64(double %x)
   %b = fptosi double %a to i8
   ret i8 %b
@@ -277,6 +364,11 @@ define signext i16 @test_trunc_si16(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_trunc_si16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.trunc.f64(double %x)
   %b = fptosi double %a to i16
   ret i16 %b
@@ -287,6 +379,11 @@ define signext i32 @test_trunc_si32(double %x) {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rtz
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_trunc_si32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.trunc.f64(double %x)
   %b = fptosi double %a to i32
   ret i32 %b
@@ -309,6 +406,11 @@ define i64 @test_trunc_si64(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_trunc_si64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.trunc.f64(double %x)
   %b = fptosi double %a to i64
   ret i64 %b
@@ -324,6 +426,11 @@ define zeroext i8 @test_trunc_ui8(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_trunc_ui8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.trunc.f64(double %x)
   %b = fptoui double %a to i8
   ret i8 %b
@@ -339,6 +446,11 @@ define zeroext i16 @test_trunc_ui16(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_trunc_ui16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.trunc.f64(double %x)
   %b = fptoui double %a to i16
   ret i16 %b
@@ -349,6 +461,11 @@ define signext i32 @test_trunc_ui32(double %x) {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rtz
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_trunc_ui32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.trunc.f64(double %x)
   %b = fptoui double %a to i32
   ret i32 %b
@@ -371,6 +488,11 @@ define i64 @test_trunc_ui64(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rtz
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_trunc_ui64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.trunc.f64(double %x)
   %b = fptoui double %a to i64
   ret i64 %b
@@ -386,6 +508,11 @@ define signext i8 @test_round_si8(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_round_si8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rmm
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.round.f64(double %x)
   %b = fptosi double %a to i8
   ret i8 %b
@@ -401,6 +528,11 @@ define signext i16 @test_round_si16(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_round_si16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rmm
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.round.f64(double %x)
   %b = fptosi double %a to i16
   ret i16 %b
@@ -411,6 +543,11 @@ define signext i32 @test_round_si32(double %x) {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rmm
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_round_si32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rmm
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.round.f64(double %x)
   %b = fptosi double %a to i32
   ret i32 %b
@@ -433,6 +570,11 @@ define i64 @test_round_si64(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rmm
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_round_si64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rmm
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.round.f64(double %x)
   %b = fptosi double %a to i64
   ret i64 %b
@@ -448,6 +590,11 @@ define zeroext i8 @test_round_ui8(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rmm
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_round_ui8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rmm
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.round.f64(double %x)
   %b = fptoui double %a to i8
   ret i8 %b
@@ -463,6 +610,11 @@ define zeroext i16 @test_round_ui16(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rmm
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_round_ui16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rmm
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.round.f64(double %x)
   %b = fptoui double %a to i16
   ret i16 %b
@@ -473,6 +625,11 @@ define signext i32 @test_round_ui32(double %x) {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rmm
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_round_ui32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rmm
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.round.f64(double %x)
   %b = fptoui double %a to i32
   ret i32 %b
@@ -495,6 +652,11 @@ define i64 @test_round_ui64(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rmm
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_round_ui64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rmm
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.round.f64(double %x)
   %b = fptoui double %a to i64
   ret i64 %b
@@ -510,6 +672,11 @@ define signext i8 @test_roundeven_si8(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rne
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_roundeven_si8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rne
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.roundeven.f64(double %x)
   %b = fptosi double %a to i8
   ret i8 %b
@@ -525,6 +692,11 @@ define signext i16 @test_roundeven_si16(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rne
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_roundeven_si16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rne
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.roundeven.f64(double %x)
   %b = fptosi double %a to i16
   ret i16 %b
@@ -535,6 +707,11 @@ define signext i32 @test_roundeven_si32(double %x) {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    fcvt.w.d a0, fa0, rne
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_roundeven_si32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.w.d a0, a0, rne
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.roundeven.f64(double %x)
   %b = fptosi double %a to i32
   ret i32 %b
@@ -557,6 +734,11 @@ define i64 @test_roundeven_si64(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.l.d a0, fa0, rne
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_roundeven_si64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a0, a0, rne
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.roundeven.f64(double %x)
   %b = fptosi double %a to i64
   ret i64 %b
@@ -572,6 +754,11 @@ define zeroext i8 @test_roundeven_ui8(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rne
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_roundeven_ui8:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rne
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.roundeven.f64(double %x)
   %b = fptoui double %a to i8
   ret i8 %b
@@ -587,6 +774,11 @@ define zeroext i16 @test_roundeven_ui16(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rne
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_roundeven_ui16:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rne
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.roundeven.f64(double %x)
   %b = fptoui double %a to i16
   ret i16 %b
@@ -597,6 +789,11 @@ define signext i32 @test_roundeven_ui32(double %x) {
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rne
 ; CHECKIFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_roundeven_ui32:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.wu.d a0, a0, rne
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.roundeven.f64(double %x)
   %b = fptoui double %a to i32
   ret i32 %b
@@ -619,6 +816,11 @@ define i64 @test_roundeven_ui64(double %x) {
 ; RV64IFD:       # %bb.0:
 ; RV64IFD-NEXT:    fcvt.lu.d a0, fa0, rne
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_roundeven_ui64:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    fcvt.lu.d a0, a0, rne
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.roundeven.f64(double %x)
   %b = fptoui double %a to i64
   ret i64 %b
@@ -642,6 +844,20 @@ define double @test_floor_double(double %x) {
 ; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB40_2:
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_floor_double:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(.LCPI40_0)
+; RV64IZFINXZDINX-NEXT:    ld a1, %lo(.LCPI40_0)(a1)
+; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
+; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
+; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB40_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rdn
+; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1, rdn
+; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:  .LBB40_2:
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.floor.f64(double %x)
   ret double %a
 }
@@ -664,6 +880,20 @@ define double @test_ceil_double(double %x) {
 ; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB41_2:
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_ceil_double:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(.LCPI41_0)
+; RV64IZFINXZDINX-NEXT:    ld a1, %lo(.LCPI41_0)(a1)
+; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
+; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
+; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB41_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rup
+; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1, rup
+; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:  .LBB41_2:
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.ceil.f64(double %x)
   ret double %a
 }
@@ -686,6 +916,20 @@ define double @test_trunc_double(double %x) {
 ; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB42_2:
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_trunc_double:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(.LCPI42_0)
+; RV64IZFINXZDINX-NEXT:    ld a1, %lo(.LCPI42_0)(a1)
+; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
+; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
+; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB42_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rtz
+; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1, rtz
+; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:  .LBB42_2:
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.trunc.f64(double %x)
   ret double %a
 }
@@ -708,6 +952,20 @@ define double @test_round_double(double %x) {
 ; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB43_2:
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_round_double:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(.LCPI43_0)
+; RV64IZFINXZDINX-NEXT:    ld a1, %lo(.LCPI43_0)(a1)
+; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
+; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
+; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB43_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rmm
+; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1, rmm
+; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:  .LBB43_2:
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.round.f64(double %x)
   ret double %a
 }
@@ -730,6 +988,20 @@ define double @test_roundeven_double(double %x) {
 ; RV64IFD-NEXT:    fsgnj.d fa0, fa5, fa0
 ; RV64IFD-NEXT:  .LBB44_2:
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: test_roundeven_double:
+; RV64IZFINXZDINX:       # %bb.0:
+; RV64IZFINXZDINX-NEXT:    lui a1, %hi(.LCPI44_0)
+; RV64IZFINXZDINX-NEXT:    ld a1, %lo(.LCPI44_0)(a1)
+; RV64IZFINXZDINX-NEXT:    fabs.d a2, a0
+; RV64IZFINXZDINX-NEXT:    flt.d a1, a2, a1
+; RV64IZFINXZDINX-NEXT:    beqz a1, .LBB44_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1:
+; RV64IZFINXZDINX-NEXT:    fcvt.l.d a1, a0, rne
+; RV64IZFINXZDINX-NEXT:    fcvt.d.l a1, a1, rne
+; RV64IZFINXZDINX-NEXT:    fsgnj.d a0, a1, a0
+; RV64IZFINXZDINX-NEXT:  .LBB44_2:
+; RV64IZFINXZDINX-NEXT:    ret
   %a = call double @llvm.roundeven.f64(double %x)
   ret double %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll
index b3cb0e8fb506..f89b6aa21c1e 100644
--- a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll
@@ -3,12 +3,19 @@
 ; RUN:   -target-abi=ilp32d | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   -target-abi=lp64d | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64 | FileCheck --check-prefix=CHECKRV64ZDINX %s
 
 define double @select_fcmp_false(double %a, double %b) nounwind {
 ; CHECK-LABEL: select_fcmp_false:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_false:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    mv a0, a1
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp false double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -23,6 +30,15 @@ define double @select_fcmp_oeq(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB1_2:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_oeq:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    feq.d a2, a0, a1
+; CHECKRV64ZDINX-NEXT:    bnez a2, .LBB1_2
+; CHECKRV64ZDINX-NEXT:  # %bb.1:
+; CHECKRV64ZDINX-NEXT:    mv a0, a1
+; CHECKRV64ZDINX-NEXT:  .LBB1_2:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp oeq double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -37,6 +53,15 @@ define double @select_fcmp_ogt(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB2_2:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_ogt:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    flt.d a2, a1, a0
+; CHECKRV64ZDINX-NEXT:    bnez a2, .LBB2_2
+; CHECKRV64ZDINX-NEXT:  # %bb.1:
+; CHECKRV64ZDINX-NEXT:    mv a0, a1
+; CHECKRV64ZDINX-NEXT:  .LBB2_2:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp ogt double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -51,6 +76,15 @@ define double @select_fcmp_oge(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB3_2:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_oge:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    fle.d a2, a1, a0
+; CHECKRV64ZDINX-NEXT:    bnez a2, .LBB3_2
+; CHECKRV64ZDINX-NEXT:  # %bb.1:
+; CHECKRV64ZDINX-NEXT:    mv a0, a1
+; CHECKRV64ZDINX-NEXT:  .LBB3_2:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp oge double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -65,6 +99,15 @@ define double @select_fcmp_olt(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB4_2:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_olt:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    flt.d a2, a0, a1
+; CHECKRV64ZDINX-NEXT:    bnez a2, .LBB4_2
+; CHECKRV64ZDINX-NEXT:  # %bb.1:
+; CHECKRV64ZDINX-NEXT:    mv a0, a1
+; CHECKRV64ZDINX-NEXT:  .LBB4_2:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp olt double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -79,6 +122,15 @@ define double @select_fcmp_ole(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB5_2:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_ole:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    fle.d a2, a0, a1
+; CHECKRV64ZDINX-NEXT:    bnez a2, .LBB5_2
+; CHECKRV64ZDINX-NEXT:  # %bb.1:
+; CHECKRV64ZDINX-NEXT:    mv a0, a1
+; CHECKRV64ZDINX-NEXT:  .LBB5_2:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp ole double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -95,6 +147,17 @@ define double @select_fcmp_one(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB6_2:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_one:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    flt.d a2, a0, a1
+; CHECKRV64ZDINX-NEXT:    flt.d a3, a1, a0
+; CHECKRV64ZDINX-NEXT:    or a2, a3, a2
+; CHECKRV64ZDINX-NEXT:    bnez a2, .LBB6_2
+; CHECKRV64ZDINX-NEXT:  # %bb.1:
+; CHECKRV64ZDINX-NEXT:    mv a0, a1
+; CHECKRV64ZDINX-NEXT:  .LBB6_2:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp one double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -111,6 +174,17 @@ define double @select_fcmp_ord(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB7_2:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_ord:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    feq.d a2, a1, a1
+; CHECKRV64ZDINX-NEXT:    feq.d a3, a0, a0
+; CHECKRV64ZDINX-NEXT:    and a2, a3, a2
+; CHECKRV64ZDINX-NEXT:    bnez a2, .LBB7_2
+; CHECKRV64ZDINX-NEXT:  # %bb.1:
+; CHECKRV64ZDINX-NEXT:    mv a0, a1
+; CHECKRV64ZDINX-NEXT:  .LBB7_2:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp ord double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -127,6 +201,17 @@ define double @select_fcmp_ueq(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB8_2:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_ueq:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    flt.d a2, a0, a1
+; CHECKRV64ZDINX-NEXT:    flt.d a3, a1, a0
+; CHECKRV64ZDINX-NEXT:    or a2, a3, a2
+; CHECKRV64ZDINX-NEXT:    beqz a2, .LBB8_2
+; CHECKRV64ZDINX-NEXT:  # %bb.1:
+; CHECKRV64ZDINX-NEXT:    mv a0, a1
+; CHECKRV64ZDINX-NEXT:  .LBB8_2:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp ueq double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -141,6 +226,15 @@ define double @select_fcmp_ugt(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB9_2:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_ugt:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    fle.d a2, a0, a1
+; CHECKRV64ZDINX-NEXT:    beqz a2, .LBB9_2
+; CHECKRV64ZDINX-NEXT:  # %bb.1:
+; CHECKRV64ZDINX-NEXT:    mv a0, a1
+; CHECKRV64ZDINX-NEXT:  .LBB9_2:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp ugt double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -155,6 +249,15 @@ define double @select_fcmp_uge(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB10_2:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_uge:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    flt.d a2, a0, a1
+; CHECKRV64ZDINX-NEXT:    beqz a2, .LBB10_2
+; CHECKRV64ZDINX-NEXT:  # %bb.1:
+; CHECKRV64ZDINX-NEXT:    mv a0, a1
+; CHECKRV64ZDINX-NEXT:  .LBB10_2:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp uge double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -169,6 +272,15 @@ define double @select_fcmp_ult(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB11_2:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_ult:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    fle.d a2, a1, a0
+; CHECKRV64ZDINX-NEXT:    beqz a2, .LBB11_2
+; CHECKRV64ZDINX-NEXT:  # %bb.1:
+; CHECKRV64ZDINX-NEXT:    mv a0, a1
+; CHECKRV64ZDINX-NEXT:  .LBB11_2:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp ult double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -183,6 +295,15 @@ define double @select_fcmp_ule(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB12_2:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_ule:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    flt.d a2, a1, a0
+; CHECKRV64ZDINX-NEXT:    beqz a2, .LBB12_2
+; CHECKRV64ZDINX-NEXT:  # %bb.1:
+; CHECKRV64ZDINX-NEXT:    mv a0, a1
+; CHECKRV64ZDINX-NEXT:  .LBB12_2:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp ule double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -197,6 +318,15 @@ define double @select_fcmp_une(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB13_2:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_une:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    feq.d a2, a0, a1
+; CHECKRV64ZDINX-NEXT:    beqz a2, .LBB13_2
+; CHECKRV64ZDINX-NEXT:  # %bb.1:
+; CHECKRV64ZDINX-NEXT:    mv a0, a1
+; CHECKRV64ZDINX-NEXT:  .LBB13_2:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp une double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -213,6 +343,17 @@ define double @select_fcmp_uno(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB14_2:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_uno:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    feq.d a2, a1, a1
+; CHECKRV64ZDINX-NEXT:    feq.d a3, a0, a0
+; CHECKRV64ZDINX-NEXT:    and a2, a3, a2
+; CHECKRV64ZDINX-NEXT:    beqz a2, .LBB14_2
+; CHECKRV64ZDINX-NEXT:  # %bb.1:
+; CHECKRV64ZDINX-NEXT:    mv a0, a1
+; CHECKRV64ZDINX-NEXT:  .LBB14_2:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp uno double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -222,6 +363,10 @@ define double @select_fcmp_true(double %a, double %b) nounwind {
 ; CHECK-LABEL: select_fcmp_true:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_true:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp true double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -237,6 +382,16 @@ define i32 @i32_select_fcmp_oeq(double %a, double %b, i32 %c, i32 %d) nounwind {
 ; CHECK-NEXT:    mv a0, a1
 ; CHECK-NEXT:  .LBB16_2:
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: i32_select_fcmp_oeq:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    feq.d a1, a0, a1
+; CHECKRV64ZDINX-NEXT:    mv a0, a2
+; CHECKRV64ZDINX-NEXT:    bnez a1, .LBB16_2
+; CHECKRV64ZDINX-NEXT:  # %bb.1:
+; CHECKRV64ZDINX-NEXT:    mv a0, a3
+; CHECKRV64ZDINX-NEXT:  .LBB16_2:
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp oeq double %a, %b
   %2 = select i1 %1, i32 %c, i32 %d
   ret i32 %2
@@ -249,6 +404,13 @@ define i32 @select_fcmp_oeq_1_2(double %a, double %b) {
 ; CHECK-NEXT:    li a1, 2
 ; CHECK-NEXT:    sub a0, a1, a0
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_oeq_1_2:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    feq.d a0, a0, a1
+; CHECKRV64ZDINX-NEXT:    li a1, 2
+; CHECKRV64ZDINX-NEXT:    sub a0, a1, a0
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp fast oeq double %a, %b
   %2 = select i1 %1, i32 1, i32 2
   ret i32 %2
@@ -260,6 +422,12 @@ define signext i32 @select_fcmp_uge_negone_zero(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fle.d a0, fa0, fa1
 ; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_uge_negone_zero:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    fle.d a0, a0, a1
+; CHECKRV64ZDINX-NEXT:    addi a0, a0, -1
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp ugt double %a, %b
   %2 = select i1 %1, i32 -1, i32 0
   ret i32 %2
@@ -271,6 +439,12 @@ define signext i32 @select_fcmp_uge_1_2(double %a, double %b) nounwind {
 ; CHECK-NEXT:    fle.d a0, fa0, fa1
 ; CHECK-NEXT:    addi a0, a0, 1
 ; CHECK-NEXT:    ret
+;
+; CHECKRV64ZDINX-LABEL: select_fcmp_uge_1_2:
+; CHECKRV64ZDINX:       # %bb.0:
+; CHECKRV64ZDINX-NEXT:    fle.d a0, a0, a1
+; CHECKRV64ZDINX-NEXT:    addi a0, a0, 1
+; CHECKRV64ZDINX-NEXT:    ret
   %1 = fcmp ugt double %a, %b
   %2 = select i1 %1, i32 1, i32 2
   ret i32 %2

diff  --git a/llvm/test/CodeGen/RISCV/double-select-icmp.ll b/llvm/test/CodeGen/RISCV/double-select-icmp.ll
index 429b791baf19..f29eaa5985b2 100644
--- a/llvm/test/CodeGen/RISCV/double-select-icmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-select-icmp.ll
@@ -4,6 +4,8 @@
 ; RUN:   -target-abi=ilp32d | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   -target-abi=lp64d | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
+; RUN:   -target-abi=lp64 | FileCheck -check-prefixes=RV64ZDINX %s
 
 define double @select_icmp_eq(i32 signext %a, i32 signext %b, double %c, double %d) {
 ; CHECK-LABEL: select_icmp_eq:
@@ -13,6 +15,15 @@ define double @select_icmp_eq(i32 signext %a, i32 signext %b, double %c, double
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB0_2:
 ; CHECK-NEXT:    ret
+;
+; RV64ZDINX-LABEL: select_icmp_eq:
+; RV64ZDINX:       # %bb.0:
+; RV64ZDINX-NEXT:    beq a0, a1, .LBB0_2
+; RV64ZDINX-NEXT:  # %bb.1:
+; RV64ZDINX-NEXT:    mv a2, a3
+; RV64ZDINX-NEXT:  .LBB0_2:
+; RV64ZDINX-NEXT:    mv a0, a2
+; RV64ZDINX-NEXT:    ret
   %1 = icmp eq i32 %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -26,6 +37,15 @@ define double @select_icmp_ne(i32 signext %a, i32 signext %b, double %c, double
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB1_2:
 ; CHECK-NEXT:    ret
+;
+; RV64ZDINX-LABEL: select_icmp_ne:
+; RV64ZDINX:       # %bb.0:
+; RV64ZDINX-NEXT:    bne a0, a1, .LBB1_2
+; RV64ZDINX-NEXT:  # %bb.1:
+; RV64ZDINX-NEXT:    mv a2, a3
+; RV64ZDINX-NEXT:  .LBB1_2:
+; RV64ZDINX-NEXT:    mv a0, a2
+; RV64ZDINX-NEXT:    ret
   %1 = icmp ne i32 %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -39,6 +59,15 @@ define double @select_icmp_ugt(i32 signext %a, i32 signext %b, double %c, double
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB2_2:
 ; CHECK-NEXT:    ret
+;
+; RV64ZDINX-LABEL: select_icmp_ugt:
+; RV64ZDINX:       # %bb.0:
+; RV64ZDINX-NEXT:    bltu a1, a0, .LBB2_2
+; RV64ZDINX-NEXT:  # %bb.1:
+; RV64ZDINX-NEXT:    mv a2, a3
+; RV64ZDINX-NEXT:  .LBB2_2:
+; RV64ZDINX-NEXT:    mv a0, a2
+; RV64ZDINX-NEXT:    ret
   %1 = icmp ugt i32 %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -52,6 +81,15 @@ define double @select_icmp_uge(i32 signext %a, i32 signext %b, double %c, double
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB3_2:
 ; CHECK-NEXT:    ret
+;
+; RV64ZDINX-LABEL: select_icmp_uge:
+; RV64ZDINX:       # %bb.0:
+; RV64ZDINX-NEXT:    bgeu a0, a1, .LBB3_2
+; RV64ZDINX-NEXT:  # %bb.1:
+; RV64ZDINX-NEXT:    mv a2, a3
+; RV64ZDINX-NEXT:  .LBB3_2:
+; RV64ZDINX-NEXT:    mv a0, a2
+; RV64ZDINX-NEXT:    ret
   %1 = icmp uge i32 %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -65,6 +103,15 @@ define double @select_icmp_ult(i32 signext %a, i32 signext %b, double %c, double
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB4_2:
 ; CHECK-NEXT:    ret
+;
+; RV64ZDINX-LABEL: select_icmp_ult:
+; RV64ZDINX:       # %bb.0:
+; RV64ZDINX-NEXT:    bltu a0, a1, .LBB4_2
+; RV64ZDINX-NEXT:  # %bb.1:
+; RV64ZDINX-NEXT:    mv a2, a3
+; RV64ZDINX-NEXT:  .LBB4_2:
+; RV64ZDINX-NEXT:    mv a0, a2
+; RV64ZDINX-NEXT:    ret
   %1 = icmp ult i32 %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -78,6 +125,15 @@ define double @select_icmp_ule(i32 signext %a, i32 signext %b, double %c, double
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB5_2:
 ; CHECK-NEXT:    ret
+;
+; RV64ZDINX-LABEL: select_icmp_ule:
+; RV64ZDINX:       # %bb.0:
+; RV64ZDINX-NEXT:    bgeu a1, a0, .LBB5_2
+; RV64ZDINX-NEXT:  # %bb.1:
+; RV64ZDINX-NEXT:    mv a2, a3
+; RV64ZDINX-NEXT:  .LBB5_2:
+; RV64ZDINX-NEXT:    mv a0, a2
+; RV64ZDINX-NEXT:    ret
   %1 = icmp ule i32 %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -91,6 +147,15 @@ define double @select_icmp_sgt(i32 signext %a, i32 signext %b, double %c, double
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB6_2:
 ; CHECK-NEXT:    ret
+;
+; RV64ZDINX-LABEL: select_icmp_sgt:
+; RV64ZDINX:       # %bb.0:
+; RV64ZDINX-NEXT:    blt a1, a0, .LBB6_2
+; RV64ZDINX-NEXT:  # %bb.1:
+; RV64ZDINX-NEXT:    mv a2, a3
+; RV64ZDINX-NEXT:  .LBB6_2:
+; RV64ZDINX-NEXT:    mv a0, a2
+; RV64ZDINX-NEXT:    ret
   %1 = icmp sgt i32 %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -104,6 +169,15 @@ define double @select_icmp_sge(i32 signext %a, i32 signext %b, double %c, double
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB7_2:
 ; CHECK-NEXT:    ret
+;
+; RV64ZDINX-LABEL: select_icmp_sge:
+; RV64ZDINX:       # %bb.0:
+; RV64ZDINX-NEXT:    bge a0, a1, .LBB7_2
+; RV64ZDINX-NEXT:  # %bb.1:
+; RV64ZDINX-NEXT:    mv a2, a3
+; RV64ZDINX-NEXT:  .LBB7_2:
+; RV64ZDINX-NEXT:    mv a0, a2
+; RV64ZDINX-NEXT:    ret
   %1 = icmp sge i32 %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -117,6 +191,15 @@ define double @select_icmp_slt(i32 signext %a, i32 signext %b, double %c, double
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB8_2:
 ; CHECK-NEXT:    ret
+;
+; RV64ZDINX-LABEL: select_icmp_slt:
+; RV64ZDINX:       # %bb.0:
+; RV64ZDINX-NEXT:    blt a0, a1, .LBB8_2
+; RV64ZDINX-NEXT:  # %bb.1:
+; RV64ZDINX-NEXT:    mv a2, a3
+; RV64ZDINX-NEXT:  .LBB8_2:
+; RV64ZDINX-NEXT:    mv a0, a2
+; RV64ZDINX-NEXT:    ret
   %1 = icmp slt i32 %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2
@@ -130,6 +213,15 @@ define double @select_icmp_sle(i32 signext %a, i32 signext %b, double %c, double
 ; CHECK-NEXT:    fmv.d fa0, fa1
 ; CHECK-NEXT:  .LBB9_2:
 ; CHECK-NEXT:    ret
+;
+; RV64ZDINX-LABEL: select_icmp_sle:
+; RV64ZDINX:       # %bb.0:
+; RV64ZDINX-NEXT:    bge a1, a0, .LBB9_2
+; RV64ZDINX-NEXT:  # %bb.1:
+; RV64ZDINX-NEXT:    mv a2, a3
+; RV64ZDINX-NEXT:  .LBB9_2:
+; RV64ZDINX-NEXT:    mv a0, a2
+; RV64ZDINX-NEXT:    ret
   %1 = icmp sle i32 %a, %b
   %2 = select i1 %1, double %c, double %d
   ret double %2

diff  --git a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
index 95438fda9a70..0ab98a75e9db 100644
--- a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
+++ b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
@@ -3,6 +3,8 @@
 ; RUN:   | FileCheck -check-prefix=RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi=lp64 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV64IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+zdinx -target-abi=lp64 -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IZFINXZDINX %s
 
 define double @func(double %d, i32 %n) nounwind {
 ; RV32IFD-LABEL: func:
@@ -55,6 +57,28 @@ define double @func(double %d, i32 %n) nounwind {
 ; RV64IFD-NEXT:  .LBB0_2: # %return
 ; RV64IFD-NEXT:    fmv.x.d a0, fa5
 ; RV64IFD-NEXT:    ret
+;
+; RV64IZFINXZDINX-LABEL: func:
+; RV64IZFINXZDINX:       # %bb.0: # %entry
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, -16
+; RV64IZFINXZDINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    sd s0, 0(sp) # 8-byte Folded Spill
+; RV64IZFINXZDINX-NEXT:    sext.w a2, a1
+; RV64IZFINXZDINX-NEXT:    mv s0, a0
+; RV64IZFINXZDINX-NEXT:    beqz a2, .LBB0_2
+; RV64IZFINXZDINX-NEXT:  # %bb.1: # %if.else
+; RV64IZFINXZDINX-NEXT:    addiw a1, a1, -1
+; RV64IZFINXZDINX-NEXT:    mv a0, s0
+; RV64IZFINXZDINX-NEXT:    call func at plt
+; RV64IZFINXZDINX-NEXT:    fadd.d a0, a0, s0
+; RV64IZFINXZDINX-NEXT:    j .LBB0_3
+; RV64IZFINXZDINX-NEXT:  .LBB0_2: # %return
+; RV64IZFINXZDINX-NEXT:    mv a0, s0
+; RV64IZFINXZDINX-NEXT:  .LBB0_3: # %return
+; RV64IZFINXZDINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    ld s0, 0(sp) # 8-byte Folded Reload
+; RV64IZFINXZDINX-NEXT:    addi sp, sp, 16
+; RV64IZFINXZDINX-NEXT:    ret
 entry:
   %cmp = icmp eq i32 %n, 0
   br i1 %cmp, label %return, label %if.else


        


More information about the llvm-commits mailing list