[llvm] r329874 - [RISCV] Add codegen support for RV32D floating point arithmetic operations

Alex Bradbury via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 11 22:42:42 PDT 2018


Author: asb
Date: Wed Apr 11 22:42:42 2018
New Revision: 329874

URL: http://llvm.org/viewvc/llvm-project?rev=329874&view=rev
Log:
[RISCV] Add codegen support for RV32D floating point arithmetic operations

Modified:
    llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/trunk/lib/Target/RISCV/RISCVInstrInfoD.td
    llvm/trunk/test/CodeGen/RISCV/double-arith.ll

Modified: llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp?rev=329874&r1=329873&r2=329874&view=diff
==============================================================================
--- llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/RISCV/RISCVISelLowering.cpp Wed Apr 11 22:42:42 2018
@@ -121,8 +121,11 @@ RISCVTargetLowering::RISCVTargetLowering
     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
   }
 
-  if (Subtarget.hasStdExtD())
+  if (Subtarget.hasStdExtD()) {
+    setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
+    setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
+  }
 
   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
   setOperationAction(ISD::BlockAddress, XLenVT, Custom);

Modified: llvm/trunk/lib/Target/RISCV/RISCVInstrInfoD.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/RISCV/RISCVInstrInfoD.td?rev=329874&r1=329873&r2=329874&view=diff
==============================================================================
--- llvm/trunk/lib/Target/RISCV/RISCVInstrInfoD.td (original)
+++ llvm/trunk/lib/Target/RISCV/RISCVInstrInfoD.td Wed Apr 11 22:42:42 2018
@@ -191,6 +191,9 @@ def : InstAlias<"fneg.d $rd, $rs", (FSGN
 // Pseudo-instructions and codegen patterns
 //===----------------------------------------------------------------------===//
 
+class PatFpr64Fpr64<SDPatternOperator OpNode, RVInstR Inst>
+    : Pat<(OpNode FPR64:$rs1, FPR64:$rs2), (Inst $rs1, $rs2)>;
+
 class PatFpr64Fpr64DynFrm<SDPatternOperator OpNode, RVInstRFrm Inst>
     : Pat<(OpNode FPR64:$rs1, FPR64:$rs2), (Inst $rs1, $rs2, 0b111)>;
 
@@ -199,6 +202,32 @@ let Predicates = [HasStdExtD] in {
 /// Float arithmetic operations
 
 def : PatFpr64Fpr64DynFrm<fadd, FADD_D>;
+def : PatFpr64Fpr64DynFrm<fsub, FSUB_D>;
+def : PatFpr64Fpr64DynFrm<fmul, FMUL_D>;
+def : PatFpr64Fpr64DynFrm<fdiv, FDIV_D>;
+
+def : Pat<(fsqrt FPR64:$rs1), (FSQRT_D FPR64:$rs1, 0b111)>;
+
+def : Pat<(fneg FPR64:$rs1), (FSGNJN_D $rs1, $rs1)>;
+def : Pat<(fabs FPR64:$rs1), (FSGNJX_D $rs1, $rs1)>;
+
+def : PatFpr64Fpr64<fcopysign, FSGNJ_D>;
+def : Pat<(fcopysign FPR64:$rs1, (fneg FPR64:$rs2)), (FSGNJN_D $rs1, $rs2)>;
+
+// The RISC-V 2.2 user-level ISA spec defines fmin and fmax as returning the
+// canonical NaN when giving a signaling NaN. This doesn't match the LLVM
+// behaviour (see https://bugs.llvm.org/show_bug.cgi?id=27363). However, the
+// draft 2.3 ISA spec changes the definition of fmin and fmax in a way that
+// matches LLVM's fminnum and fmaxnum
+// <https://github.com/riscv/riscv-isa-manual/commit/cd20cee7efd9bac7c5aa127ec3b451749d2b3cce>.
+def : PatFpr64Fpr64<fminnum, FMIN_D>;
+def : PatFpr64Fpr64<fmaxnum, FMAX_D>;
+
+/// Setcc
+
+def : PatFpr64Fpr64<setoeq, FEQ_D>;
+def : PatFpr64Fpr64<setolt, FLT_D>;
+def : PatFpr64Fpr64<setole, FLE_D>;
 
 /// Loads
 

Modified: llvm/trunk/test/CodeGen/RISCV/double-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/RISCV/double-arith.ll?rev=329874&r1=329873&r2=329874&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/RISCV/double-arith.ll (original)
+++ llvm/trunk/test/CodeGen/RISCV/double-arith.ll Wed Apr 11 22:42:42 2018
@@ -21,3 +21,259 @@ define double @fadd_d(double %a, double
   %1 = fadd double %a, %b
   ret double %1
 }
+
+define double @fsub_d(double %a, double %b) nounwind {
+; RV32IFD-LABEL: fsub_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a2, 8(sp)
+; RV32IFD-NEXT:    sw a3, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft1, 8(sp)
+; RV32IFD-NEXT:    fsub.d ft0, ft1, ft0
+; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    lw a0, 8(sp)
+; RV32IFD-NEXT:    lw a1, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+  %1 = fsub double %a, %b
+  ret double %1
+}
+
+define double @fmul_d(double %a, double %b) nounwind {
+; RV32IFD-LABEL: fmul_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a2, 8(sp)
+; RV32IFD-NEXT:    sw a3, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft1, 8(sp)
+; RV32IFD-NEXT:    fmul.d ft0, ft1, ft0
+; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    lw a0, 8(sp)
+; RV32IFD-NEXT:    lw a1, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+  %1 = fmul double %a, %b
+  ret double %1
+}
+
+define double @fdiv_d(double %a, double %b) nounwind {
+; RV32IFD-LABEL: fdiv_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a2, 8(sp)
+; RV32IFD-NEXT:    sw a3, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft1, 8(sp)
+; RV32IFD-NEXT:    fdiv.d ft0, ft1, ft0
+; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    lw a0, 8(sp)
+; RV32IFD-NEXT:    lw a1, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+  %1 = fdiv double %a, %b
+  ret double %1
+}
+
+declare double @llvm.sqrt.f32(double)
+
+define double @fsqrt_d(double %a) nounwind {
+; RV32IFD-LABEL: fsqrt_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    fsqrt.d ft0, ft0
+; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    lw a0, 8(sp)
+; RV32IFD-NEXT:    lw a1, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+  %1 = call double @llvm.sqrt.f32(double %a)
+  ret double %1
+}
+
+declare double @llvm.copysign.f32(double, double)
+
+define double @fsgnj_d(double %a, double %b) nounwind {
+; RV32IFD-LABEL: fsgnj_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a2, 8(sp)
+; RV32IFD-NEXT:    sw a3, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft1, 8(sp)
+; RV32IFD-NEXT:    fsgnj.d ft0, ft1, ft0
+; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    lw a0, 8(sp)
+; RV32IFD-NEXT:    lw a1, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+  %1 = call double @llvm.copysign.f32(double %a, double %b)
+  ret double %1
+}
+
+define double @fneg_d(double %a) nounwind {
+; RV32IFD-LABEL: fneg_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    fneg.d ft0, ft0
+; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    lw a0, 8(sp)
+; RV32IFD-NEXT:    lw a1, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+  %1 = fsub double -0.0, %a
+  ret double %1
+}
+
+define double @fsgnjn_d(double %a, double %b) nounwind {
+; RV32IFD-LABEL: fsgnjn_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a2, 8(sp)
+; RV32IFD-NEXT:    sw a3, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft1, 8(sp)
+; RV32IFD-NEXT:    fsgnjn.d ft0, ft1, ft0
+; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    lw a0, 8(sp)
+; RV32IFD-NEXT:    lw a1, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+  %1 = fsub double -0.0, %b
+  %2 = call double @llvm.copysign.f32(double %a, double %1)
+  ret double %2
+}
+
+declare double @llvm.fabs.f32(double)
+
+define double @fabs_d(double %a) nounwind {
+; RV32IFD-LABEL: fabs_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    fabs.d ft0, ft0
+; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    lw a0, 8(sp)
+; RV32IFD-NEXT:    lw a1, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+  %1 = call double @llvm.fabs.f32(double %a)
+  ret double %1
+}
+
+declare double @llvm.minnum.f32(double, double)
+
+define double @fmin_d(double %a, double %b) nounwind {
+; RV32IFD-LABEL: fmin_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a2, 8(sp)
+; RV32IFD-NEXT:    sw a3, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft1, 8(sp)
+; RV32IFD-NEXT:    fmin.d ft0, ft1, ft0
+; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    lw a0, 8(sp)
+; RV32IFD-NEXT:    lw a1, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+  %1 = call double @llvm.minnum.f32(double %a, double %b)
+  ret double %1
+}
+
+declare double @llvm.maxnum.f32(double, double)
+
+define double @fmax_d(double %a, double %b) nounwind {
+; RV32IFD-LABEL: fmax_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a2, 8(sp)
+; RV32IFD-NEXT:    sw a3, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft1, 8(sp)
+; RV32IFD-NEXT:    fmax.d ft0, ft1, ft0
+; RV32IFD-NEXT:    fsd ft0, 8(sp)
+; RV32IFD-NEXT:    lw a0, 8(sp)
+; RV32IFD-NEXT:    lw a1, 12(sp)
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+  %1 = call double @llvm.maxnum.f32(double %a, double %b)
+  ret double %1
+}
+
+define i32 @feq_d(double %a, double %b) nounwind {
+; RV32IFD-LABEL: feq_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a2, 8(sp)
+; RV32IFD-NEXT:    sw a3, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft1, 8(sp)
+; RV32IFD-NEXT:    feq.d a0, ft1, ft0
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+  %1 = fcmp oeq double %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @flt_d(double %a, double %b) nounwind {
+; RV32IFD-LABEL: flt_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a2, 8(sp)
+; RV32IFD-NEXT:    sw a3, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft1, 8(sp)
+; RV32IFD-NEXT:    flt.d a0, ft1, ft0
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+  %1 = fcmp olt double %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @fle_d(double %a, double %b) nounwind {
+; RV32IFD-LABEL: fle_d:
+; RV32IFD:       # %bb.0:
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a2, 8(sp)
+; RV32IFD-NEXT:    sw a3, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft1, 8(sp)
+; RV32IFD-NEXT:    fle.d a0, ft1, ft0
+; RV32IFD-NEXT:    addi sp, sp, 16
+; RV32IFD-NEXT:    ret
+  %1 = fcmp ole double %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}




More information about the llvm-commits mailing list