[llvm] r352833 - [RISCV] Implement RV64D codegen

Alex Bradbury via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 31 19:53:30 PST 2019


Author: asb
Date: Thu Jan 31 19:53:30 2019
New Revision: 352833

URL: http://llvm.org/viewvc/llvm-project?rev=352833&view=rev
Log:
[RISCV] Implement RV64D codegen

This patch:
* Adds necessary RV64D codegen patterns
* Modifies CC_RISCV so it will properly handle f64 types (with soft float ABI)

Note that in general there is no reason to try to select fcvt.w[u].d rather than fcvt.l[u].d for i32 conversions because fptosi/fptoui produce poison if the input won't fit into the target type.

Differential Revision: https://reviews.llvm.org/D53237


Added:
    llvm/trunk/test/CodeGen/RISCV/rv64d-double-convert.ll
Modified:
    llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/trunk/lib/Target/RISCV/RISCVInstrInfoD.td
    llvm/trunk/test/CodeGen/RISCV/double-arith.ll
    llvm/trunk/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
    llvm/trunk/test/CodeGen/RISCV/double-br-fcmp.ll
    llvm/trunk/test/CodeGen/RISCV/double-convert.ll
    llvm/trunk/test/CodeGen/RISCV/double-fcmp.ll
    llvm/trunk/test/CodeGen/RISCV/double-imm.ll
    llvm/trunk/test/CodeGen/RISCV/double-intrinsics.ll
    llvm/trunk/test/CodeGen/RISCV/double-mem.ll
    llvm/trunk/test/CodeGen/RISCV/double-select-fcmp.ll
    llvm/trunk/test/CodeGen/RISCV/double-stack-spill-restore.ll

Modified: llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp?rev=352833&r1=352832&r2=352833&view=diff
==============================================================================
--- llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp Thu Jan 31 19:53:30 2019
@@ -935,6 +935,10 @@ static bool CC_RISCV(const DataLayout &D
     LocVT = XLenVT;
     LocInfo = CCValAssign::BCvt;
   }
+  if (XLen == 64 && ValVT == MVT::f64) {
+    LocVT = MVT::i64;
+    LocInfo = CCValAssign::BCvt;
+  }
 
   // Any return value split in to more than two values can't be returned
   // directly.
@@ -1042,8 +1046,9 @@ static bool CC_RISCV(const DataLayout &D
     return false;
   }
 
-  if (ValVT == MVT::f32) {
-    LocVT = MVT::f32;
+  // When an f32 or f64 is passed on the stack, no bit-conversion is needed.
+  if (ValVT == MVT::f32 || ValVT == MVT::f64) {
+    LocVT = ValVT;
     LocInfo = CCValAssign::Full;
   }
   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
@@ -1178,8 +1183,6 @@ static SDValue unpackFromMemLoc(Selectio
     ExtType = ISD::NON_EXTLOAD;
     break;
   }
-  if (ValVT == MVT::f32)
-    LocVT = MVT::f32;
   Val = DAG.getExtLoad(
       ExtType, DL, LocVT, Chain, FIN,
       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);

Modified: llvm/trunk/lib/Target/RISCV/RISCVInstrInfoD.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/RISCV/RISCVInstrInfoD.td?rev=352833&r1=352832&r2=352833&view=diff
==============================================================================
--- llvm/trunk/lib/Target/RISCV/RISCVInstrInfoD.td (original)
+++ llvm/trunk/lib/Target/RISCV/RISCVInstrInfoD.td Thu Jan 31 19:53:30 2019
@@ -307,3 +307,26 @@ def : Pat<(fp_to_uint FPR64:$rs1), (FCVT
 def : Pat<(sint_to_fp GPR:$rs1), (FCVT_D_W GPR:$rs1)>;
 def : Pat<(uint_to_fp GPR:$rs1), (FCVT_D_WU GPR:$rs1)>;
 } // Predicates = [HasStdExtD, IsRV32]
+
+let Predicates = [HasStdExtD, IsRV64] in {
+def : Pat<(bitconvert GPR:$rs1), (FMV_D_X GPR:$rs1)>;
+def : Pat<(bitconvert FPR64:$rs1), (FMV_X_D FPR64:$rs1)>;
+
+// FP->[u]int32 is mostly handled by the FP->[u]int64 patterns. This is safe
+// because fpto[u|s]i produce poison if the value can't fit into the target.
+// We match the single case below because fcvt.wu.d sign-extends its result so
+// is cheaper than fcvt.lu.d+sext.w.
+def : Pat<(sext_inreg (zexti32 (fp_to_uint FPR64:$rs1)), i32),
+          (FCVT_WU_D $rs1, 0b001)>;
+
+// [u]int32->fp
+def : Pat<(sint_to_fp (sext_inreg GPR:$rs1, i32)), (FCVT_D_W $rs1)>;
+def : Pat<(uint_to_fp (zexti32 GPR:$rs1)), (FCVT_D_WU $rs1)>;
+
+def : Pat<(fp_to_sint FPR64:$rs1), (FCVT_L_D FPR64:$rs1, 0b001)>;
+def : Pat<(fp_to_uint FPR64:$rs1), (FCVT_LU_D FPR64:$rs1, 0b001)>;
+
+// [u]int64->fp. Match GCC and default to using dynamic rounding mode.
+def : Pat<(sint_to_fp GPR:$rs1), (FCVT_D_L GPR:$rs1, 0b111)>;
+def : Pat<(uint_to_fp GPR:$rs1), (FCVT_D_LU GPR:$rs1, 0b111)>;
+} // Predicates = [HasStdExtD, IsRV64]

Modified: llvm/trunk/test/CodeGen/RISCV/double-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/double-arith.ll?rev=352833&r1=352832&r2=352833&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/double-arith.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/double-arith.ll Thu Jan 31 19:53:30 2019
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
 
 ; These tests are each targeted at a particular RISC-V FPU instruction. Most
 ; other files in this folder exercise LLVM IR instructions that don't directly
@@ -22,6 +24,14 @@ define double @fadd_d(double %a, double
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fadd_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fadd double %a, %b
   ret double %1
 }
@@ -42,6 +52,14 @@ define double @fsub_d(double %a, double
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fsub_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fsub.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fsub double %a, %b
   ret double %1
 }
@@ -62,6 +80,14 @@ define double @fmul_d(double %a, double
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fmul_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fmul.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fmul double %a, %b
   ret double %1
 }
@@ -82,6 +108,14 @@ define double @fdiv_d(double %a, double
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fdiv_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fdiv.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fdiv double %a, %b
   ret double %1
 }
@@ -101,6 +135,13 @@ define double @fsqrt_d(double %a) nounwi
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fsqrt_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fsqrt.d ft0, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.sqrt.f64(double %a)
   ret double %1
 }
@@ -123,6 +164,14 @@ define double @fsgnj_d(double %a, double
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fsgnj_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fsgnj.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.copysign.f64(double %a, double %b)
   ret double %1
 }
@@ -141,6 +190,14 @@ define i32 @fneg_d(double %a, double %b)
 ; RV32IFD-NEXT:    feq.d a0, ft0, ft1
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fneg_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fadd.d ft0, ft0, ft0
+; RV64IFD-NEXT:    fneg.d ft1, ft0
+; RV64IFD-NEXT:    feq.d a0, ft0, ft1
+; RV64IFD-NEXT:    ret
   %1 = fadd double %a, %a
   %2 = fneg double %1
   %3 = fcmp oeq double %1, %2
@@ -149,6 +206,9 @@ define i32 @fneg_d(double %a, double %b)
 }
 
 define double @fsgnjn_d(double %a, double %b) nounwind {
+; TODO: fsgnjn.s isn't selected on RV64 because DAGCombiner::visitBITCAST will
+; convert (bitconvert (fneg x)) to a xor.
+;
 ; RV32IFD-LABEL: fsgnjn_d:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
@@ -164,6 +224,17 @@ define double @fsgnjn_d(double %a, doubl
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fsgnjn_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi a2, zero, -1
+; RV64IFD-NEXT:    slli a2, a2, 63
+; RV64IFD-NEXT:    xor a1, a1, a2
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fsgnj.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fsub double -0.0, %b
   %2 = call double @llvm.copysign.f64(double %a, double %1)
   ret double %2
@@ -191,6 +262,16 @@ define double @fabs_d(double %a, double
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fabs_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fabs.d ft1, ft0
+; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fadd double %a, %b
   %2 = call double @llvm.fabs.f64(double %1)
   %3 = fadd double %2, %1
@@ -215,6 +296,14 @@ define double @fmin_d(double %a, double
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fmin_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fmin.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.minnum.f64(double %a, double %b)
   ret double %1
 }
@@ -237,6 +326,14 @@ define double @fmax_d(double %a, double
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fmax_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fmax.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.maxnum.f64(double %a, double %b)
   ret double %1
 }
@@ -254,6 +351,13 @@ define i32 @feq_d(double %a, double %b)
 ; RV32IFD-NEXT:    feq.d a0, ft1, ft0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: feq_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp oeq double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -272,6 +376,13 @@ define i32 @flt_d(double %a, double %b)
 ; RV32IFD-NEXT:    flt.d a0, ft1, ft0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: flt_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp olt double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -290,6 +401,13 @@ define i32 @fle_d(double %a, double %b)
 ; RV32IFD-NEXT:    fle.d a0, ft1, ft0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fle_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp ole double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -316,6 +434,15 @@ define double @fmadd_d(double %a, double
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fmadd_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a2
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fmv.d.x ft2, a0
+; RV64IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
   ret double %1
 }
@@ -343,6 +470,19 @@ define double @fmsub_d(double %a, double
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fmsub_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a2
+; RV64IFD-NEXT:    lui a2, %hi(.LCPI15_0)
+; RV64IFD-NEXT:    addi a2, a2, %lo(.LCPI15_0)
+; RV64IFD-NEXT:    fld ft1, 0(a2)
+; RV64IFD-NEXT:    fadd.d ft0, ft0, ft1
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fmv.d.x ft2, a0
+; RV64IFD-NEXT:    fmsub.d ft0, ft2, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %c_ = fadd double 0.0, %c ; avoid negation using xor
   %negc = fsub double -0.0, %c_
   %1 = call double @llvm.fma.f64(double %a, double %b, double %negc)
@@ -373,6 +513,20 @@ define double @fnmadd_d(double %a, doubl
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fnmadd_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a2
+; RV64IFD-NEXT:    lui a2, %hi(.LCPI16_0)
+; RV64IFD-NEXT:    addi a2, a2, %lo(.LCPI16_0)
+; RV64IFD-NEXT:    fld ft1, 0(a2)
+; RV64IFD-NEXT:    fadd.d ft0, ft0, ft1
+; RV64IFD-NEXT:    fmv.d.x ft2, a0
+; RV64IFD-NEXT:    fadd.d ft1, ft2, ft1
+; RV64IFD-NEXT:    fmv.d.x ft2, a1
+; RV64IFD-NEXT:    fnmadd.d ft0, ft1, ft2, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %a_ = fadd double 0.0, %a
   %c_ = fadd double 0.0, %c
   %nega = fsub double -0.0, %a_
@@ -404,6 +558,19 @@ define double @fnmsub_d(double %a, doubl
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fnmsub_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    lui a0, %hi(.LCPI17_0)
+; RV64IFD-NEXT:    addi a0, a0, %lo(.LCPI17_0)
+; RV64IFD-NEXT:    fld ft1, 0(a0)
+; RV64IFD-NEXT:    fadd.d ft0, ft0, ft1
+; RV64IFD-NEXT:    fmv.d.x ft1, a2
+; RV64IFD-NEXT:    fmv.d.x ft2, a1
+; RV64IFD-NEXT:    fnmsub.d ft0, ft0, ft2, ft1
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %a_ = fadd double 0.0, %a
   %nega = fsub double -0.0, %a_
   %1 = call double @llvm.fma.f64(double %nega, double %b, double %c)

Modified: llvm/trunk/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll?rev=352833&r1=352832&r2=352833&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll Thu Jan 31 19:53:30 2019
@@ -5,6 +5,8 @@
 ; RUN:   | FileCheck -check-prefix=RV32IFD %s
 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
 ;
 ; This file tests cases where simple floating point operations can be
 ; profitably handled though bit manipulation if a soft-float ABI is being used
@@ -34,6 +36,13 @@ define double @fneg(double %a) nounwind
 ; RV64I-NEXT:    slli a1, a1, 63
 ; RV64I-NEXT:    xor a0, a0, a1
 ; RV64I-NEXT:    ret
+;
+; RV64IFD-LABEL: fneg:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi a1, zero, -1
+; RV64IFD-NEXT:    slli a1, a1, 63
+; RV64IFD-NEXT:    xor a0, a0, a1
+; RV64IFD-NEXT:    ret
   %1 = fneg double %a
   ret double %1
 }
@@ -62,6 +71,14 @@ define double @fabs(double %a) nounwind
 ; RV64I-NEXT:    addi a1, a1, -1
 ; RV64I-NEXT:    and a0, a0, a1
 ; RV64I-NEXT:    ret
+;
+; RV64IFD-LABEL: fabs:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi a1, zero, -1
+; RV64IFD-NEXT:    slli a1, a1, 63
+; RV64IFD-NEXT:    addi a1, a1, -1
+; RV64IFD-NEXT:    and a0, a0, a1
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.fabs.f64(double %a)
   ret double %1
 }
@@ -109,6 +126,17 @@ define double @fcopysign_fneg(double %a,
 ; RV64I-NEXT:    and a0, a0, a2
 ; RV64I-NEXT:    or a0, a0, a1
 ; RV64I-NEXT:    ret
+;
+; RV64IFD-LABEL: fcopysign_fneg:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi a2, zero, -1
+; RV64IFD-NEXT:    slli a2, a2, 63
+; RV64IFD-NEXT:    xor a1, a1, a2
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fsgnj.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fneg double %b
   %2 = call double @llvm.copysign.f64(double %a, double %1)
   ret double %2

Modified: llvm/trunk/test/CodeGen/RISCV/double-br-fcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/double-br-fcmp.ll?rev=352833&r1=352832&r2=352833&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/double-br-fcmp.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/double-br-fcmp.ll Thu Jan 31 19:53:30 2019
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
 
 declare void @abort()
 declare void @exit(i32)
@@ -18,6 +20,19 @@ define void @br_fcmp_false(double %a, do
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB0_2: # %if.else
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_false:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    addi a0, zero, 1
+; RV64IFD-NEXT:    bnez a0, .LBB0_2
+; RV64IFD-NEXT:  # %bb.1: # %if.then
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB0_2: # %if.else
+; RV64IFD-NEXT:    call abort
   %1 = fcmp false double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -46,6 +61,21 @@ define void @br_fcmp_oeq(double %a, doub
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB1_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_oeq:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    bnez a0, .LBB1_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB1_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp oeq double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -78,6 +108,22 @@ define void @br_fcmp_oeq_alt(double %a,
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB2_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_oeq_alt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    beqz a0, .LBB2_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB2_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp oeq double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.then:
@@ -106,6 +152,21 @@ define void @br_fcmp_ogt(double %a, doub
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB3_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_ogt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    bnez a0, .LBB3_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB3_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp ogt double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -134,6 +195,21 @@ define void @br_fcmp_oge(double %a, doub
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB4_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_oge:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    bnez a0, .LBB4_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB4_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp oge double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -162,6 +238,21 @@ define void @br_fcmp_olt(double %a, doub
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB5_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_olt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    bnez a0, .LBB5_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB5_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp olt double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -190,6 +281,21 @@ define void @br_fcmp_ole(double %a, doub
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB6_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_ole:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    bnez a0, .LBB6_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB6_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp ole double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -226,6 +332,28 @@ define void @br_fcmp_one(double %a, doub
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB7_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_one:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    feq.d a0, ft1, ft1
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    feq.d a1, ft0, ft1
+; RV64IFD-NEXT:    not a1, a1
+; RV64IFD-NEXT:    seqz a0, a0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    bnez a0, .LBB7_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB7_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp one double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -258,6 +386,25 @@ define void @br_fcmp_ord(double %a, doub
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB8_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_ord:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    feq.d a0, ft0, ft0
+; RV64IFD-NEXT:    and a0, a0, a1
+; RV64IFD-NEXT:    seqz a0, a0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    bnez a0, .LBB8_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB8_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp ord double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -291,6 +438,26 @@ define void @br_fcmp_ueq(double %a, doub
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB9_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_ueq:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    feq.d a2, ft1, ft1
+; RV64IFD-NEXT:    and a1, a2, a1
+; RV64IFD-NEXT:    seqz a1, a1
+; RV64IFD-NEXT:    or a0, a0, a1
+; RV64IFD-NEXT:    bnez a0, .LBB9_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB9_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp ueq double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -320,6 +487,22 @@ define void @br_fcmp_ugt(double %a, doub
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB10_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_ugt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    bnez a0, .LBB10_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB10_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp ugt double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -349,6 +532,22 @@ define void @br_fcmp_uge(double %a, doub
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB11_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_uge:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    bnez a0, .LBB11_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB11_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp uge double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -378,6 +577,22 @@ define void @br_fcmp_ult(double %a, doub
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB12_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_ult:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    bnez a0, .LBB12_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB12_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp ult double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -407,6 +622,22 @@ define void @br_fcmp_ule(double %a, doub
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB13_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_ule:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    bnez a0, .LBB13_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB13_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp ule double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -436,6 +667,22 @@ define void @br_fcmp_une(double %a, doub
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB14_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_une:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    bnez a0, .LBB14_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB14_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp une double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -468,6 +715,24 @@ define void @br_fcmp_uno(double %a, doub
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB15_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_uno:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    feq.d a0, ft0, ft0
+; RV64IFD-NEXT:    and a0, a0, a1
+; RV64IFD-NEXT:    seqz a0, a0
+; RV64IFD-NEXT:    bnez a0, .LBB15_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB15_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp uno double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:
@@ -490,6 +755,19 @@ define void @br_fcmp_true(double %a, dou
 ; RV32IFD-NEXT:    ret
 ; RV32IFD-NEXT:  .LBB16_2: # %if.then
 ; RV32IFD-NEXT:    call abort
+;
+; RV64IFD-LABEL: br_fcmp_true:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    addi a0, zero, 1
+; RV64IFD-NEXT:    bnez a0, .LBB16_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
+; RV64IFD-NEXT:  .LBB16_2: # %if.then
+; RV64IFD-NEXT:    call abort
   %1 = fcmp true double %a, %b
   br i1 %1, label %if.then, label %if.else
 if.else:

Modified: llvm/trunk/test/CodeGen/RISCV/double-convert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/double-convert.ll?rev=352833&r1=352832&r2=352833&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/double-convert.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/double-convert.ll Thu Jan 31 19:53:30 2019
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
 
 define float @fcvt_s_d(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_s_d:
@@ -13,6 +15,13 @@ define float @fcvt_s_d(double %a) nounwi
 ; RV32IFD-NEXT:    fmv.x.w a0, ft0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_s_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fcvt.s.d ft0, ft0
+; RV64IFD-NEXT:    fmv.x.w a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fptrunc double %a to float
   ret float %1
 }
@@ -28,10 +37,19 @@ define double @fcvt_d_s(float %a) nounwi
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_d_s:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.w.x ft0, a0
+; RV64IFD-NEXT:    fcvt.d.s ft0, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fpext float %a to double
   ret double %1
 }
 
+; For RV64D, fcvt.l.d is semantically equivalent to fcvt.w.d in this case
+; because fptosi will produce poison if the result doesn't fit into an i32.
 define i32 @fcvt_w_d(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_w_d:
 ; RV32IFD:       # %bb.0:
@@ -42,10 +60,18 @@ define i32 @fcvt_w_d(double %a) nounwind
 ; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rtz
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_w_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT:    ret
   %1 = fptosi double %a to i32
   ret i32 %1
 }
 
+; For RV64D, fcvt.lu.d is semantically equivalent to fcvt.wu.d in this case
+; because fptosi will produce poison if the result doesn't fit into an i32.
 define i32 @fcvt_wu_d(double %a) nounwind {
 ; RV32IFD-LABEL: fcvt_wu_d:
 ; RV32IFD:       # %bb.0:
@@ -56,6 +82,12 @@ define i32 @fcvt_wu_d(double %a) nounwin
 ; RV32IFD-NEXT:    fcvt.wu.d a0, ft0, rtz
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_wu_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64IFD-NEXT:    ret
   %1 = fptoui double %a to i32
   ret i32 %1
 }
@@ -70,6 +102,12 @@ define double @fcvt_d_w(i32 %a) nounwind
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_d_w:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.d.w ft0, a0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = sitofp i32 %a to double
   ret double %1
 }
@@ -84,6 +122,148 @@ define double @fcvt_d_wu(i32 %a) nounwin
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_d_wu:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.d.wu ft0, a0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = uitofp i32 %a to double
   ret double %1
 }
+
+define i64 @fcvt_l_d(double %a) nounwind {
+; RV32IFD-LABEL: fcvt_l_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    call __fixdfdi
+; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_l_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT:    ret
+  %1 = fptosi double %a to i64
+  ret i64 %1
+}
+
+define i64 @fcvt_lu_d(double %a) nounwind {
+; RV32IFD-LABEL: fcvt_lu_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    call __fixunsdfdi
+; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_lu_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64IFD-NEXT:    ret
+  %1 = fptoui double %a to i64
+  ret i64 %1
+}
+
+define i64 @fmv_x_d(double %a, double %b) nounwind {
+; RV32IFD-LABEL: fmv_x_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a2, 0(sp)
+; RV32IFD-NEXT:    sw a3, 4(sp)
+; RV32IFD-NEXT:    fld ft0, 0(sp)
+; RV32IFD-NEXT:    sw a0, 0(sp)
+; RV32IFD-NEXT:    sw a1, 4(sp)
+; RV32IFD-NEXT:    fld ft1, 0(sp)
+; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    lw a0, 8(sp)
+; RV32IFD-NEXT:    lw a1, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fmv_x_d:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
+  %1 = fadd double %a, %b
+  %2 = bitcast double %1 to i64
+  ret i64 %2
+}
+
+define double @fcvt_d_l(i64 %a) nounwind {
+; RV32IFD-LABEL: fcvt_d_l:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    call __floatdidf
+; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_d_l:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.d.l ft0, a0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
+  %1 = sitofp i64 %a to double
+  ret double %1
+}
+
+define double @fcvt_d_lu(i64 %a) nounwind {
+; RV32IFD-LABEL: fcvt_d_lu:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw ra, 12(sp)
+; RV32IFD-NEXT:    call __floatundidf
+; RV32IFD-NEXT:    lw ra, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcvt_d_lu:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fcvt.d.lu ft0, a0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
+  %1 = uitofp i64 %a to double
+  ret double %1
+}
+
+define double @fmv_d_x(i64 %a, i64 %b) nounwind {
+; Ensure fmv.w.x is generated even for a soft double calling convention
+; RV32IFD-LABEL: fmv_d_x:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -32
+; RV32IFD-NEXT:    sw a3, 20(sp)
+; RV32IFD-NEXT:    sw a2, 16(sp)
+; RV32IFD-NEXT:    sw a1, 28(sp)
+; RV32IFD-NEXT:    sw a0, 24(sp)
+; RV32IFD-NEXT:    fld ft0, 16(sp)
+; RV32IFD-NEXT:    fld ft1, 24(sp)
+; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    lw a0, 8(sp)
+; RV32IFD-NEXT:    lw a1, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 32
+; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fmv_d_x:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
+  %1 = bitcast i64 %a to double
+  %2 = bitcast i64 %b to double
+  %3 = fadd double %1, %2
+  ret double %3
+}

Modified: llvm/trunk/test/CodeGen/RISCV/double-fcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/double-fcmp.ll?rev=352833&r1=352832&r2=352833&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/double-fcmp.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/double-fcmp.ll Thu Jan 31 19:53:30 2019
@@ -1,12 +1,19 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
 
 define i32 @fcmp_false(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_false:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    mv a0, zero
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_false:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    mv a0, zero
+; RV64IFD-NEXT:    ret
   %1 = fcmp false double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -25,6 +32,13 @@ define i32 @fcmp_oeq(double %a, double %
 ; RV32IFD-NEXT:    feq.d a0, ft1, ft0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_oeq:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp oeq double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -43,6 +57,13 @@ define i32 @fcmp_ogt(double %a, double %
 ; RV32IFD-NEXT:    flt.d a0, ft1, ft0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ogt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp ogt double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -61,6 +82,13 @@ define i32 @fcmp_oge(double %a, double %
 ; RV32IFD-NEXT:    fle.d a0, ft1, ft0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_oge:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp oge double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -79,6 +107,13 @@ define i32 @fcmp_olt(double %a, double %
 ; RV32IFD-NEXT:    flt.d a0, ft1, ft0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_olt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp olt double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -97,6 +132,13 @@ define i32 @fcmp_ole(double %a, double %
 ; RV32IFD-NEXT:    fle.d a0, ft1, ft0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ole:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp ole double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -122,6 +164,20 @@ define i32 @fcmp_one(double %a, double %
 ; RV32IFD-NEXT:    and a0, a1, a0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_one:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    feq.d a0, ft1, ft1
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    feq.d a1, ft0, ft1
+; RV64IFD-NEXT:    not a1, a1
+; RV64IFD-NEXT:    seqz a0, a0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    ret
   %1 = fcmp one double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -144,6 +200,17 @@ define i32 @fcmp_ord(double %a, double %
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ord:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    feq.d a0, ft0, ft0
+; RV64IFD-NEXT:    and a0, a0, a1
+; RV64IFD-NEXT:    seqz a0, a0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
   %1 = fcmp ord double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -167,6 +234,18 @@ define i32 @fcmp_ueq(double %a, double %
 ; RV32IFD-NEXT:    or a0, a0, a1
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ueq:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    feq.d a2, ft1, ft1
+; RV64IFD-NEXT:    and a1, a2, a1
+; RV64IFD-NEXT:    seqz a1, a1
+; RV64IFD-NEXT:    or a0, a0, a1
+; RV64IFD-NEXT:    ret
   %1 = fcmp ueq double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -186,6 +265,14 @@ define i32 @fcmp_ugt(double %a, double %
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ugt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
   %1 = fcmp ugt double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -205,6 +292,14 @@ define i32 @fcmp_uge(double %a, double %
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_uge:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
   %1 = fcmp uge double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -224,6 +319,14 @@ define i32 @fcmp_ult(double %a, double %
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ult:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
   %1 = fcmp ult double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -243,6 +346,14 @@ define i32 @fcmp_ule(double %a, double %
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ule:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
   %1 = fcmp ule double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -262,6 +373,14 @@ define i32 @fcmp_une(double %a, double %
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_une:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
   %1 = fcmp une double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -283,6 +402,16 @@ define i32 @fcmp_uno(double %a, double %
 ; RV32IFD-NEXT:    seqz a0, a0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_uno:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    feq.d a0, ft0, ft0
+; RV64IFD-NEXT:    and a0, a0, a1
+; RV64IFD-NEXT:    seqz a0, a0
+; RV64IFD-NEXT:    ret
   %1 = fcmp uno double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -293,6 +422,11 @@ define i32 @fcmp_true(double %a, double
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi a0, zero, 1
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_true:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi a0, zero, 1
+; RV64IFD-NEXT:    ret
   %1 = fcmp true double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2

Modified: llvm/trunk/test/CodeGen/RISCV/double-imm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/double-imm.ll?rev=352833&r1=352832&r2=352833&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/double-imm.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/double-imm.ll Thu Jan 31 19:53:30 2019
@@ -1,8 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
 
 define double @double_imm() nounwind {
+; TODO: Should probably prefer fld or ld on RV64 rather than materialising an
+; expensive constant.
+;
 ; RV32IFD-LABEL: double_imm:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
@@ -14,6 +19,18 @@ define double @double_imm() nounwind {
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: double_imm:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    lui a0, 512
+; RV64IFD-NEXT:    addiw a0, a0, 1169
+; RV64IFD-NEXT:    slli a0, a0, 15
+; RV64IFD-NEXT:    addi a0, a0, -299
+; RV64IFD-NEXT:    slli a0, a0, 14
+; RV64IFD-NEXT:    addi a0, a0, 1091
+; RV64IFD-NEXT:    slli a0, a0, 12
+; RV64IFD-NEXT:    addi a0, a0, -744
+; RV64IFD-NEXT:    ret
   ret double 3.1415926535897931159979634685441851615905761718750
 }
 
@@ -33,6 +50,16 @@ define double @double_imm_op(double %a)
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: double_imm_op:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    lui a0, %hi(.LCPI1_0)
+; RV64IFD-NEXT:    addi a0, a0, %lo(.LCPI1_0)
+; RV64IFD-NEXT:    fld ft1, 0(a0)
+; RV64IFD-NEXT:    fadd.d ft0, ft0, ft1
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fadd double %a, 1.0
   ret double %1
 }

Modified: llvm/trunk/test/CodeGen/RISCV/double-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/double-intrinsics.ll?rev=352833&r1=352832&r2=352833&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/double-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/double-intrinsics.ll Thu Jan 31 19:53:30 2019
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
 
 declare double @llvm.sqrt.f64(double)
 
@@ -17,6 +19,13 @@ define double @sqrt_f64(double %a) nounw
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: sqrt_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fsqrt.d ft0, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.sqrt.f64(double %a)
   ret double %1
 }
@@ -32,6 +41,16 @@ define double @powi_f64(double %a, i32 %
 ; RV32IFD-NEXT:    lw ra, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: powi_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    sext.w a1, a1
+; RV64IFD-NEXT:    call __powidf2
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.powi.f64(double %a, i32 %b)
   ret double %1
 }
@@ -47,6 +66,15 @@ define double @sin_f64(double %a) nounwi
 ; RV32IFD-NEXT:    lw ra, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: sin_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call sin
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.sin.f64(double %a)
   ret double %1
 }
@@ -62,6 +90,15 @@ define double @cos_f64(double %a) nounwi
 ; RV32IFD-NEXT:    lw ra, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: cos_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call cos
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.cos.f64(double %a)
   ret double %1
 }
@@ -101,6 +138,27 @@ define double @sincos_f64(double %a) nou
 ; RV32IFD-NEXT:    lw ra, 28(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 32
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: sincos_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -32
+; RV64IFD-NEXT:    sd ra, 24(sp)
+; RV64IFD-NEXT:    sd s1, 16(sp)
+; RV64IFD-NEXT:    sd s2, 8(sp)
+; RV64IFD-NEXT:    mv s1, a0
+; RV64IFD-NEXT:    call sin
+; RV64IFD-NEXT:    mv s2, a0
+; RV64IFD-NEXT:    mv a0, s1
+; RV64IFD-NEXT:    call cos
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, s2
+; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ld s2, 8(sp)
+; RV64IFD-NEXT:    ld s1, 16(sp)
+; RV64IFD-NEXT:    ld ra, 24(sp)
+; RV64IFD-NEXT:    addi sp, sp, 32
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.sin.f64(double %a)
   %2 = call double @llvm.cos.f64(double %a)
   %3 = fadd double %1, %2
@@ -118,6 +176,15 @@ define double @pow_f64(double %a, double
 ; RV32IFD-NEXT:    lw ra, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: pow_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call pow
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.pow.f64(double %a, double %b)
   ret double %1
 }
@@ -133,6 +200,15 @@ define double @exp_f64(double %a) nounwi
 ; RV32IFD-NEXT:    lw ra, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: exp_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call exp
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.exp.f64(double %a)
   ret double %1
 }
@@ -148,6 +224,15 @@ define double @exp2_f64(double %a) nounw
 ; RV32IFD-NEXT:    lw ra, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: exp2_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call exp2
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.exp2.f64(double %a)
   ret double %1
 }
@@ -163,6 +248,15 @@ define double @log_f64(double %a) nounwi
 ; RV32IFD-NEXT:    lw ra, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: log_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call log
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.log.f64(double %a)
   ret double %1
 }
@@ -178,6 +272,15 @@ define double @log10_f64(double %a) noun
 ; RV32IFD-NEXT:    lw ra, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: log10_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call log10
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.log10.f64(double %a)
   ret double %1
 }
@@ -193,6 +296,15 @@ define double @log2_f64(double %a) nounw
 ; RV32IFD-NEXT:    lw ra, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: log2_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call log2
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.log2.f64(double %a)
   ret double %1
 }
@@ -218,6 +330,15 @@ define double @fma_f64(double %a, double
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fma_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a2
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fmv.d.x ft2, a0
+; RV64IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
   ret double %1
 }
@@ -245,6 +366,16 @@ define double @fmuladd_f64(double %a, do
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fmuladd_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fmul.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.d.x ft1, a2
+; RV64IFD-NEXT:    fadd.d ft0, ft0, ft1
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
   ret double %1
 }
@@ -258,6 +389,14 @@ define double @fabs_f64(double %a) nounw
 ; RV32IFD-NEXT:    addi a2, a2, -1
 ; RV32IFD-NEXT:    and a1, a1, a2
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fabs_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi a1, zero, -1
+; RV64IFD-NEXT:    slli a1, a1, 63
+; RV64IFD-NEXT:    addi a1, a1, -1
+; RV64IFD-NEXT:    and a0, a0, a1
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.fabs.f64(double %a)
   ret double %1
 }
@@ -280,6 +419,14 @@ define double @minnum_f64(double %a, dou
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: minnum_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fmin.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.minnum.f64(double %a, double %b)
   ret double %1
 }
@@ -302,6 +449,14 @@ define double @maxnum_f64(double %a, dou
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: maxnum_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fmax.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.maxnum.f64(double %a, double %b)
   ret double %1
 }
@@ -341,6 +496,14 @@ define double @copysign_f64(double %a, d
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: copysign_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fsgnj.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.copysign.f64(double %a, double %b)
   ret double %1
 }
@@ -356,6 +519,15 @@ define double @floor_f64(double %a) noun
 ; RV32IFD-NEXT:    lw ra, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: floor_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call floor
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.floor.f64(double %a)
   ret double %1
 }
@@ -371,6 +543,15 @@ define double @ceil_f64(double %a) nounw
 ; RV32IFD-NEXT:    lw ra, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: ceil_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call ceil
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.ceil.f64(double %a)
   ret double %1
 }
@@ -386,6 +567,15 @@ define double @trunc_f64(double %a) noun
 ; RV32IFD-NEXT:    lw ra, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: trunc_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call trunc
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.trunc.f64(double %a)
   ret double %1
 }
@@ -401,6 +591,15 @@ define double @rint_f64(double %a) nounw
 ; RV32IFD-NEXT:    lw ra, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: rint_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call rint
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.rint.f64(double %a)
   ret double %1
 }
@@ -416,6 +615,15 @@ define double @nearbyint_f64(double %a)
 ; RV32IFD-NEXT:    lw ra, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: nearbyint_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call nearbyint
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.nearbyint.f64(double %a)
   ret double %1
 }
@@ -431,6 +639,15 @@ define double @round_f64(double %a) noun
 ; RV32IFD-NEXT:    lw ra, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: round_f64:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    call round
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = call double @llvm.round.f64(double %a)
   ret double %1
 }

Modified: llvm/trunk/test/CodeGen/RISCV/double-mem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/double-mem.ll?rev=352833&r1=352832&r2=352833&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/double-mem.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/double-mem.ll Thu Jan 31 19:53:30 2019
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
 
 define double @fld(double *%a) nounwind {
 ; RV32IFD-LABEL: fld:
@@ -14,6 +16,14 @@ define double @fld(double *%a) nounwind
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fld:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fld ft0, 24(a0)
+; RV64IFD-NEXT:    fld ft1, 0(a0)
+; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = load double, double* %a
   %2 = getelementptr double, double* %a, i32 3
   %3 = load double, double* %2
@@ -38,6 +48,15 @@ define void @fsd(double *%a, double %b,
 ; RV32IFD-NEXT:    fsd ft0, 0(a0)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fsd:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a2
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fsd ft0, 64(a0)
+; RV64IFD-NEXT:    fsd ft0, 0(a0)
+; RV64IFD-NEXT:    ret
 ; Use %b and %c in an FP op to ensure floating point registers are used, even
 ; for the soft float ABI
   %1 = fadd double %b, %c
@@ -72,6 +91,20 @@ define double @fld_fsd_global(double %a,
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fld_fsd_global:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT:    lui a0, %hi(G)
+; RV64IFD-NEXT:    fld ft1, %lo(G)(a0)
+; RV64IFD-NEXT:    fsd ft0, %lo(G)(a0)
+; RV64IFD-NEXT:    addi a0, a0, %lo(G)
+; RV64IFD-NEXT:    fld ft1, 72(a0)
+; RV64IFD-NEXT:    fsd ft0, 72(a0)
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
 ; Use %a and %b in an FP op to ensure floating point registers are used, even
 ; for the soft float ABI
   %1 = fadd double %a, %b
@@ -100,6 +133,18 @@ define double @fld_fsd_constant(double %
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fld_fsd_constant:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    lui a0, 56
+; RV64IFD-NEXT:    addiw a0, a0, -1353
+; RV64IFD-NEXT:    slli a0, a0, 14
+; RV64IFD-NEXT:    fld ft1, -273(a0)
+; RV64IFD-NEXT:    fadd.d ft0, ft0, ft1
+; RV64IFD-NEXT:    fsd ft0, -273(a0)
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = inttoptr i32 3735928559 to double*
   %2 = load volatile double, double* %1
   %3 = fadd double %a, %2
@@ -133,6 +178,23 @@ define double @fld_stack(double %a) noun
 ; RV32IFD-NEXT:    lw ra, 28(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 32
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fld_stack:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -32
+; RV64IFD-NEXT:    sd ra, 24(sp)
+; RV64IFD-NEXT:    sd s1, 16(sp)
+; RV64IFD-NEXT:    mv s1, a0
+; RV64IFD-NEXT:    addi a0, sp, 8
+; RV64IFD-NEXT:    call notdead
+; RV64IFD-NEXT:    fmv.d.x ft0, s1
+; RV64IFD-NEXT:    fld ft1, 8(sp)
+; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ld s1, 16(sp)
+; RV64IFD-NEXT:    ld ra, 24(sp)
+; RV64IFD-NEXT:    addi sp, sp, 32
+; RV64IFD-NEXT:    ret
   %1 = alloca double, align 8
   %2 = bitcast double* %1 to i8*
   call void @notdead(i8* %2)
@@ -159,6 +221,20 @@ define void @fsd_stack(double %a, double
 ; RV32IFD-NEXT:    lw ra, 28(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 32
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fsd_stack:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
+; RV64IFD-NEXT:    fsd ft0, 0(sp)
+; RV64IFD-NEXT:    mv a0, sp
+; RV64IFD-NEXT:    call notdead
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
   %1 = fadd double %a, %b ; force store from FPR64
   %2 = alloca double, align 8
   store double %1, double* %2
@@ -179,6 +255,13 @@ define void @fsd_trunc(float* %a, double
 ; RV32IFD-NEXT:    fsw ft0, 0(a0)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fsd_trunc:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fcvt.s.d ft0, ft0
+; RV64IFD-NEXT:    fsw ft0, 0(a0)
+; RV64IFD-NEXT:    ret
   %1 = fptrunc double %b to float
   store float %1, float* %a, align 4
   ret void

Modified: llvm/trunk/test/CodeGen/RISCV/double-select-fcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/double-select-fcmp.ll?rev=352833&r1=352832&r2=352833&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/double-select-fcmp.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/double-select-fcmp.ll Thu Jan 31 19:53:30 2019
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
 
 define double @select_fcmp_false(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: select_fcmp_false:
@@ -8,6 +10,11 @@ define double @select_fcmp_false(double
 ; RV32IFD-NEXT:    mv a1, a3
 ; RV32IFD-NEXT:    mv a0, a2
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_false:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    mv a0, a1
+; RV64IFD-NEXT:    ret
   %1 = fcmp false double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -33,6 +40,18 @@ define double @select_fcmp_oeq(double %a
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_oeq:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    feq.d a0, ft0, ft1
+; RV64IFD-NEXT:    bnez a0, .LBB1_2
+; RV64IFD-NEXT:  # %bb.1:
+; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:  .LBB1_2:
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp oeq double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -58,6 +77,18 @@ define double @select_fcmp_ogt(double %a
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_ogt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    bnez a0, .LBB2_2
+; RV64IFD-NEXT:  # %bb.1:
+; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:  .LBB2_2:
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp ogt double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -83,6 +114,18 @@ define double @select_fcmp_oge(double %a
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_oge:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    bnez a0, .LBB3_2
+; RV64IFD-NEXT:  # %bb.1:
+; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:  .LBB3_2:
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp oge double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -108,6 +151,18 @@ define double @select_fcmp_olt(double %a
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_olt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    flt.d a0, ft0, ft1
+; RV64IFD-NEXT:    bnez a0, .LBB4_2
+; RV64IFD-NEXT:  # %bb.1:
+; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:  .LBB4_2:
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp olt double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -133,6 +188,18 @@ define double @select_fcmp_ole(double %a
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_ole:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fle.d a0, ft0, ft1
+; RV64IFD-NEXT:    bnez a0, .LBB5_2
+; RV64IFD-NEXT:  # %bb.1:
+; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:  .LBB5_2:
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp ole double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -166,6 +233,25 @@ define double @select_fcmp_one(double %a
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_one:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    feq.d a0, ft1, ft1
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    feq.d a1, ft0, ft1
+; RV64IFD-NEXT:    not a1, a1
+; RV64IFD-NEXT:    seqz a0, a0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    bnez a0, .LBB6_2
+; RV64IFD-NEXT:  # %bb.1:
+; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:  .LBB6_2:
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp one double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -195,6 +281,22 @@ define double @select_fcmp_ord(double %a
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_ord:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    feq.d a0, ft1, ft1
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    seqz a0, a0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    bnez a0, .LBB7_2
+; RV64IFD-NEXT:  # %bb.1:
+; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:  .LBB7_2:
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp ord double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -225,6 +327,23 @@ define double @select_fcmp_ueq(double %a
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_ueq:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    feq.d a0, ft1, ft1
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    seqz a0, a0
+; RV64IFD-NEXT:    feq.d a1, ft0, ft1
+; RV64IFD-NEXT:    or a0, a1, a0
+; RV64IFD-NEXT:    bnez a0, .LBB8_2
+; RV64IFD-NEXT:  # %bb.1:
+; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:  .LBB8_2:
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp ueq double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -251,6 +370,19 @@ define double @select_fcmp_ugt(double %a
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_ugt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fle.d a0, ft0, ft1
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    bnez a0, .LBB9_2
+; RV64IFD-NEXT:  # %bb.1:
+; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:  .LBB9_2:
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp ugt double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -277,6 +409,19 @@ define double @select_fcmp_uge(double %a
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_uge:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    flt.d a0, ft0, ft1
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    bnez a0, .LBB10_2
+; RV64IFD-NEXT:  # %bb.1:
+; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:  .LBB10_2:
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp uge double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -303,6 +448,19 @@ define double @select_fcmp_ult(double %a
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_ult:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    bnez a0, .LBB11_2
+; RV64IFD-NEXT:  # %bb.1:
+; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:  .LBB11_2:
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp ult double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -329,6 +487,19 @@ define double @select_fcmp_ule(double %a
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_ule:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    bnez a0, .LBB12_2
+; RV64IFD-NEXT:  # %bb.1:
+; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:  .LBB12_2:
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp ule double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -355,6 +526,19 @@ define double @select_fcmp_une(double %a
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_une:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    feq.d a0, ft0, ft1
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    bnez a0, .LBB13_2
+; RV64IFD-NEXT:  # %bb.1:
+; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:  .LBB13_2:
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp une double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -384,6 +568,21 @@ define double @select_fcmp_uno(double %a
 ; RV32IFD-NEXT:    lw a1, 12(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_uno:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    feq.d a0, ft1, ft1
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    seqz a0, a0
+; RV64IFD-NEXT:    bnez a0, .LBB14_2
+; RV64IFD-NEXT:  # %bb.1:
+; RV64IFD-NEXT:    fmv.d ft0, ft1
+; RV64IFD-NEXT:  .LBB14_2:
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp uno double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -393,6 +592,10 @@ define double @select_fcmp_true(double %
 ; RV32IFD-LABEL: select_fcmp_true:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: select_fcmp_true:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    ret
   %1 = fcmp true double %a, %b
   %2 = select i1 %1, double %a, double %b
   ret double %2
@@ -417,6 +620,18 @@ define i32 @i32_select_fcmp_oeq(double %
 ; RV32IFD-NEXT:    mv a0, a4
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: i32_select_fcmp_oeq:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    bnez a0, .LBB16_2
+; RV64IFD-NEXT:  # %bb.1:
+; RV64IFD-NEXT:    mv a2, a3
+; RV64IFD-NEXT:  .LBB16_2:
+; RV64IFD-NEXT:    mv a0, a2
+; RV64IFD-NEXT:    ret
   %1 = fcmp oeq double %a, %b
   %2 = select i1 %1, i32 %c, i32 %d
   ret i32 %2

Modified: llvm/trunk/test/CodeGen/RISCV/double-stack-spill-restore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/double-stack-spill-restore.ll?rev=352833&r1=352832&r2=352833&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/double-stack-spill-restore.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/double-stack-spill-restore.ll Thu Jan 31 19:53:30 2019
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
 
 define double @func(double %d, i32 %n) nounwind {
 ; RV32IFD-LABEL: func:
@@ -30,6 +32,28 @@ define double @func(double %d, i32 %n) n
 ; RV32IFD-NEXT:    lw ra, 28(sp)
 ; RV32IFD-NEXT:    addi sp, sp, 32
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: func:
+; RV64IFD:       # %bb.0: # %entry
+; RV64IFD-NEXT:    addi sp, sp, -16
+; RV64IFD-NEXT:    sd ra, 8(sp)
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    slli a0, a1, 32
+; RV64IFD-NEXT:    srli a0, a0, 32
+; RV64IFD-NEXT:    beqz a0, .LBB0_2
+; RV64IFD-NEXT:  # %bb.1: # %if.else
+; RV64IFD-NEXT:    addi a1, a1, -1
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    fsd ft0, 0(sp)
+; RV64IFD-NEXT:    call func
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fld ft1, 0(sp)
+; RV64IFD-NEXT:    fadd.d ft0, ft0, ft1
+; RV64IFD-NEXT:  .LBB0_2: # %return
+; RV64IFD-NEXT:    fmv.x.d a0, ft0
+; RV64IFD-NEXT:    ld ra, 8(sp)
+; RV64IFD-NEXT:    addi sp, sp, 16
+; RV64IFD-NEXT:    ret
 entry:
   %cmp = icmp eq i32 %n, 0
   br i1 %cmp, label %return, label %if.else

Added: llvm/trunk/test/CodeGen/RISCV/rv64d-double-convert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/rv64d-double-convert.ll?rev=352833&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/rv64d-double-convert.ll (added)
+++ llvm/trunk/test/CodeGen/RISCV/rv64d-double-convert.ll Thu Jan 31 19:53:30 2019
@@ -0,0 +1,130 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   | FileCheck %s -check-prefix=RV64ID
+
+; This file exhaustively checks double<->i32 conversions. In general,
+; fcvt.l[u].d can be selected instead of fcvt.w[u].d because poison is
+; generated for an fpto[s|u]i conversion if the result doesn't fit in the
+; target type.
+
+define i32 @aext_fptosi(double %a) nounwind {
+; RV64ID-LABEL: aext_fptosi:
+; RV64ID:       # %bb.0:
+; RV64ID-NEXT:    fmv.d.x ft0, a0
+; RV64ID-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64ID-NEXT:    ret
+  %1 = fptosi double %a to i32
+  ret i32 %1
+}
+
+define signext i32 @sext_fptosi(double %a) nounwind {
+; RV64ID-LABEL: sext_fptosi:
+; RV64ID:       # %bb.0:
+; RV64ID-NEXT:    fmv.d.x ft0, a0
+; RV64ID-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64ID-NEXT:    ret
+  %1 = fptosi double %a to i32
+  ret i32 %1
+}
+
+define zeroext i32 @zext_fptosi(double %a) nounwind {
+; RV64ID-LABEL: zext_fptosi:
+; RV64ID:       # %bb.0:
+; RV64ID-NEXT:    fmv.d.x ft0, a0
+; RV64ID-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64ID-NEXT:    slli a0, a0, 32
+; RV64ID-NEXT:    srli a0, a0, 32
+; RV64ID-NEXT:    ret
+  %1 = fptosi double %a to i32
+  ret i32 %1
+}
+
+define i32 @aext_fptoui(double %a) nounwind {
+; RV64ID-LABEL: aext_fptoui:
+; RV64ID:       # %bb.0:
+; RV64ID-NEXT:    fmv.d.x ft0, a0
+; RV64ID-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64ID-NEXT:    ret
+  %1 = fptoui double %a to i32
+  ret i32 %1
+}
+
+define signext i32 @sext_fptoui(double %a) nounwind {
+; RV64ID-LABEL: sext_fptoui:
+; RV64ID:       # %bb.0:
+; RV64ID-NEXT:    fmv.d.x ft0, a0
+; RV64ID-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV64ID-NEXT:    ret
+  %1 = fptoui double %a to i32
+  ret i32 %1
+}
+
+define zeroext i32 @zext_fptoui(double %a) nounwind {
+; RV64ID-LABEL: zext_fptoui:
+; RV64ID:       # %bb.0:
+; RV64ID-NEXT:    fmv.d.x ft0, a0
+; RV64ID-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64ID-NEXT:    ret
+  %1 = fptoui double %a to i32
+  ret i32 %1
+}
+
+define double @uitofp_aext_i32_to_f64(i32 %a) nounwind {
+; RV64ID-LABEL: uitofp_aext_i32_to_f64:
+; RV64ID:       # %bb.0:
+; RV64ID-NEXT:    fcvt.d.wu ft0, a0
+; RV64ID-NEXT:    fmv.x.d a0, ft0
+; RV64ID-NEXT:    ret
+  %1 = uitofp i32 %a to double
+  ret double %1
+}
+
+define double @uitofp_sext_i32_to_f64(i32 signext %a) nounwind {
+; RV64ID-LABEL: uitofp_sext_i32_to_f64:
+; RV64ID:       # %bb.0:
+; RV64ID-NEXT:    fcvt.d.wu ft0, a0
+; RV64ID-NEXT:    fmv.x.d a0, ft0
+; RV64ID-NEXT:    ret
+  %1 = uitofp i32 %a to double
+  ret double %1
+}
+
+define double @uitofp_zext_i32_to_f64(i32 zeroext %a) nounwind {
+; RV64ID-LABEL: uitofp_zext_i32_to_f64:
+; RV64ID:       # %bb.0:
+; RV64ID-NEXT:    fcvt.d.wu ft0, a0
+; RV64ID-NEXT:    fmv.x.d a0, ft0
+; RV64ID-NEXT:    ret
+  %1 = uitofp i32 %a to double
+  ret double %1
+}
+
+define double @sitofp_aext_i32_to_f64(i32 %a) nounwind {
+; RV64ID-LABEL: sitofp_aext_i32_to_f64:
+; RV64ID:       # %bb.0:
+; RV64ID-NEXT:    fcvt.d.w ft0, a0
+; RV64ID-NEXT:    fmv.x.d a0, ft0
+; RV64ID-NEXT:    ret
+  %1 = sitofp i32 %a to double
+  ret double %1
+}
+
+define double @sitofp_sext_i32_to_f64(i32 signext %a) nounwind {
+; RV64ID-LABEL: sitofp_sext_i32_to_f64:
+; RV64ID:       # %bb.0:
+; RV64ID-NEXT:    fcvt.d.l ft0, a0
+; RV64ID-NEXT:    fmv.x.d a0, ft0
+; RV64ID-NEXT:    ret
+  %1 = sitofp i32 %a to double
+  ret double %1
+}
+
+define double @sitofp_zext_i32_to_f64(i32 zeroext %a) nounwind {
+; RV64ID-LABEL: sitofp_zext_i32_to_f64:
+; RV64ID:       # %bb.0:
+; RV64ID-NEXT:    fcvt.d.w ft0, a0
+; RV64ID-NEXT:    fmv.x.d a0, ft0
+; RV64ID-NEXT:    ret
+  %1 = sitofp i32 %a to double
+  ret double %1
+}




More information about the llvm-commits mailing list