[llvm] 10129fe - [RISCV] Fix type in f16 and f64 version of lrint/llrint/lround/llround test cases. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 27 15:07:08 PST 2021


Author: Craig Topper
Date: 2021-12-27T14:59:23-08:00
New Revision: 10129fe86102a1178a0f4f0dcdf861c568445900

URL: https://github.com/llvm/llvm-project/commit/10129fe86102a1178a0f4f0dcdf861c568445900
DIFF: https://github.com/llvm/llvm-project/commit/10129fe86102a1178a0f4f0dcdf861c568445900.diff

LOG: [RISCV] Fix type in f16 and f64 version of lrint/llrint/lround/llround test cases. NFC

Due to a copy/paste mistake we were always testing float.

This required splitting up the f16 tests into separate files since
we don't have an appropriate libcall to use when the types involved
aren't legal.

Added: 
    llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll
    llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll

Modified: 
    llvm/test/CodeGen/RISCV/double-intrinsics.ll
    llvm/test/CodeGen/RISCV/half-intrinsics.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
index 553e8fbeec37..5b1be180c1b8 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -1146,26 +1146,30 @@ define double @roundeven_f64(double %a) nounwind {
   ret double %1
 }
 
-declare iXLen @llvm.lrint.iXLen.f64(float)
+declare iXLen @llvm.lrint.iXLen.f64(double)
 
-define iXLen @lrint_f64(float %a) nounwind {
+define iXLen @lrint_f64(double %a) nounwind {
 ; RV32IFD-LABEL: lrint_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fmv.w.x ft0, a0
-; RV32IFD-NEXT:    fcvt.w.s a0, ft0
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    fcvt.w.d a0, ft0
+; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: lrint_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.w.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.s a0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fcvt.l.d a0, ft0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: lrint_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call lrintf at plt
+; RV32I-NEXT:    call lrint at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1174,34 +1178,38 @@ define iXLen @lrint_f64(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call lrintf at plt
+; RV64I-NEXT:    call lrint at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call iXLen @llvm.lrint.iXLen.f64(float %a)
+  %1 = call iXLen @llvm.lrint.iXLen.f64(double %a)
   ret iXLen %1
 }
 
-declare iXLen @llvm.lround.iXLen.f64(float)
+declare iXLen @llvm.lround.iXLen.f64(double)
 
-define iXLen @lround_f64(float %a) nounwind {
+define iXLen @lround_f64(double %a) nounwind {
 ; RV32IFD-LABEL: lround_f64:
 ; RV32IFD:       # %bb.0:
-; RV32IFD-NEXT:    fmv.w.x ft0, a0
-; RV32IFD-NEXT:    fcvt.w.s a0, ft0, rmm
+; RV32IFD-NEXT:    addi sp, sp, -16
+; RV32IFD-NEXT:    sw a0, 8(sp)
+; RV32IFD-NEXT:    sw a1, 12(sp)
+; RV32IFD-NEXT:    fld ft0, 8(sp)
+; RV32IFD-NEXT:    fcvt.w.d a0, ft0, rmm
+; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: lround_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.w.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.s a0, ft0, rmm
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rmm
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: lround_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call lroundf at plt
+; RV32I-NEXT:    call lround at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1210,37 +1218,37 @@ define iXLen @lround_f64(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call lroundf at plt
+; RV64I-NEXT:    call lround at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call iXLen @llvm.lround.iXLen.f64(float %a)
+  %1 = call iXLen @llvm.lround.iXLen.f64(double %a)
   ret iXLen %1
 }
 
-declare i64 @llvm.llrint.i64.f64(float)
+declare i64 @llvm.llrint.i64.f64(double)
 
-define i64 @llrint_f64(float %a) nounwind {
+define i64 @llrint_f64(double %a) nounwind {
 ; RV32IFD-LABEL: llrint_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call llrintf at plt
+; RV32IFD-NEXT:    call llrint at plt
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: llrint_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.w.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.s a0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fcvt.l.d a0, ft0
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: llrint_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call llrintf at plt
+; RV32I-NEXT:    call llrint at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1249,37 +1257,37 @@ define i64 @llrint_f64(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call llrintf at plt
+; RV64I-NEXT:    call llrint at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call i64 @llvm.llrint.i64.f64(float %a)
+  %1 = call i64 @llvm.llrint.i64.f64(double %a)
   ret i64 %1
 }
 
-declare i64 @llvm.llround.i64.f64(float)
+declare i64 @llvm.llround.i64.f64(double)
 
-define i64 @llround_f64(float %a) nounwind {
+define i64 @llround_f64(double %a) nounwind {
 ; RV32IFD-LABEL: llround_f64:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi sp, sp, -16
 ; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IFD-NEXT:    call llroundf at plt
+; RV32IFD-NEXT:    call llround at plt
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
 ;
 ; RV64IFD-LABEL: llround_f64:
 ; RV64IFD:       # %bb.0:
-; RV64IFD-NEXT:    fmv.w.x ft0, a0
-; RV64IFD-NEXT:    fcvt.l.s a0, ft0, rmm
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fcvt.l.d a0, ft0, rmm
 ; RV64IFD-NEXT:    ret
 ;
 ; RV32I-LABEL: llround_f64:
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call llroundf at plt
+; RV32I-NEXT:    call llround at plt
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -1288,10 +1296,10 @@ define i64 @llround_f64(float %a) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call llroundf at plt
+; RV64I-NEXT:    call llround at plt
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call i64 @llvm.llround.i64.f64(float %a)
+  %1 = call i64 @llvm.llround.i64.f64(double %a)
   ret i64 %1
 }

diff  --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
index 128c61947cb0..88d05e6ad9eb 100644
--- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
@@ -1967,195 +1967,3 @@ define half @roundeven_f16(half %a) nounwind {
   %1 = call half @llvm.roundeven.f16(half %a)
   ret half %1
 }
-
-declare iXLen @llvm.lrint.iXLen.f16(float)
-
-define iXLen @lrint_f16(float %a) nounwind {
-; RV32IZFH-LABEL: lrint_f16:
-; RV32IZFH:       # %bb.0:
-; RV32IZFH-NEXT:    fcvt.w.s a0, fa0
-; RV32IZFH-NEXT:    ret
-;
-; RV64IZFH-LABEL: lrint_f16:
-; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    fcvt.l.s a0, fa0
-; RV64IZFH-NEXT:    ret
-;
-; RV32IDZFH-LABEL: lrint_f16:
-; RV32IDZFH:       # %bb.0:
-; RV32IDZFH-NEXT:    fcvt.w.s a0, fa0
-; RV32IDZFH-NEXT:    ret
-;
-; RV64IDZFH-LABEL: lrint_f16:
-; RV64IDZFH:       # %bb.0:
-; RV64IDZFH-NEXT:    fcvt.l.s a0, fa0
-; RV64IDZFH-NEXT:    ret
-;
-; RV32I-LABEL: lrint_f16:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call lrintf at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: lrint_f16:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call lrintf at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
-  %1 = call iXLen @llvm.lrint.iXLen.f16(float %a)
-  ret iXLen %1
-}
-
-declare iXLen @llvm.lround.iXLen.f16(float)
-
-define iXLen @lround_f16(float %a) nounwind {
-; RV32IZFH-LABEL: lround_f16:
-; RV32IZFH:       # %bb.0:
-; RV32IZFH-NEXT:    fcvt.w.s a0, fa0, rmm
-; RV32IZFH-NEXT:    ret
-;
-; RV64IZFH-LABEL: lround_f16:
-; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    fcvt.l.s a0, fa0, rmm
-; RV64IZFH-NEXT:    ret
-;
-; RV32IDZFH-LABEL: lround_f16:
-; RV32IDZFH:       # %bb.0:
-; RV32IDZFH-NEXT:    fcvt.w.s a0, fa0, rmm
-; RV32IDZFH-NEXT:    ret
-;
-; RV64IDZFH-LABEL: lround_f16:
-; RV64IDZFH:       # %bb.0:
-; RV64IDZFH-NEXT:    fcvt.l.s a0, fa0, rmm
-; RV64IDZFH-NEXT:    ret
-;
-; RV32I-LABEL: lround_f16:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call lroundf at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: lround_f16:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call lroundf at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
-  %1 = call iXLen @llvm.lround.iXLen.f16(float %a)
-  ret iXLen %1
-}
-
-declare i64 @llvm.llrint.i64.f16(float)
-
-define i64 @llrint_f16(float %a) nounwind {
-; RV32IZFH-LABEL: llrint_f16:
-; RV32IZFH:       # %bb.0:
-; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call llrintf at plt
-; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFH-NEXT:    addi sp, sp, 16
-; RV32IZFH-NEXT:    ret
-;
-; RV64IZFH-LABEL: llrint_f16:
-; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    fcvt.l.s a0, fa0
-; RV64IZFH-NEXT:    ret
-;
-; RV32IDZFH-LABEL: llrint_f16:
-; RV32IDZFH:       # %bb.0:
-; RV32IDZFH-NEXT:    addi sp, sp, -16
-; RV32IDZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT:    call llrintf at plt
-; RV32IDZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IDZFH-NEXT:    addi sp, sp, 16
-; RV32IDZFH-NEXT:    ret
-;
-; RV64IDZFH-LABEL: llrint_f16:
-; RV64IDZFH:       # %bb.0:
-; RV64IDZFH-NEXT:    fcvt.l.s a0, fa0
-; RV64IDZFH-NEXT:    ret
-;
-; RV32I-LABEL: llrint_f16:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call llrintf at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: llrint_f16:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call llrintf at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
-  %1 = call i64 @llvm.llrint.i64.f16(float %a)
-  ret i64 %1
-}
-
-declare i64 @llvm.llround.i64.f16(float)
-
-define i64 @llround_f16(float %a) nounwind {
-; RV32IZFH-LABEL: llround_f16:
-; RV32IZFH:       # %bb.0:
-; RV32IZFH-NEXT:    addi sp, sp, -16
-; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT:    call llroundf at plt
-; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IZFH-NEXT:    addi sp, sp, 16
-; RV32IZFH-NEXT:    ret
-;
-; RV64IZFH-LABEL: llround_f16:
-; RV64IZFH:       # %bb.0:
-; RV64IZFH-NEXT:    fcvt.l.s a0, fa0, rmm
-; RV64IZFH-NEXT:    ret
-;
-; RV32IDZFH-LABEL: llround_f16:
-; RV32IDZFH:       # %bb.0:
-; RV32IDZFH-NEXT:    addi sp, sp, -16
-; RV32IDZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT:    call llroundf at plt
-; RV32IDZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32IDZFH-NEXT:    addi sp, sp, 16
-; RV32IDZFH-NEXT:    ret
-;
-; RV64IDZFH-LABEL: llround_f16:
-; RV64IDZFH:       # %bb.0:
-; RV64IDZFH-NEXT:    fcvt.l.s a0, fa0, rmm
-; RV64IDZFH-NEXT:    ret
-;
-; RV32I-LABEL: llround_f16:
-; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi sp, sp, -16
-; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    call llroundf at plt
-; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT:    addi sp, sp, 16
-; RV32I-NEXT:    ret
-;
-; RV64I-LABEL: llround_f16:
-; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi sp, sp, -16
-; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    call llroundf at plt
-; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT:    addi sp, sp, 16
-; RV64I-NEXT:    ret
-  %1 = call i64 @llvm.llround.i64.f16(float %a)
-  ret i64 %1
-}

diff  --git a/llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll
new file mode 100644
index 000000000000..4091c52d6c8a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll
@@ -0,0 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv64 -mattr=+experimental-zfh \
+; RUN:   -verify-machineinstrs -target-abi lp64f | \
+; RUN:   FileCheck -check-prefix=RV64IZFH %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+d \
+; RUN:   -mattr=+experimental-zfh -verify-machineinstrs -target-abi lp64d | \
+; RUN:   FileCheck -check-prefix=RV64IDZFH %s
+
+; These intrinsics require half and i64 to be legal types.
+
+declare i64 @llvm.llrint.i64.f16(half)
+
+define i64 @llrint_f16(half %a) nounwind {
+; RV64IZFH-LABEL: llrint_f16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0
+; RV64IZFH-NEXT:    ret
+;
+; RV64IDZFH-LABEL: llrint_f16:
+; RV64IDZFH:       # %bb.0:
+; RV64IDZFH-NEXT:    fcvt.l.h a0, fa0
+; RV64IDZFH-NEXT:    ret
+  %1 = call i64 @llvm.llrint.i64.f16(half %a)
+  ret i64 %1
+}
+
+declare i64 @llvm.llround.i64.f16(half)
+
+define i64 @llround_f16(half %a) nounwind {
+; RV64IZFH-LABEL: llround_f16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rmm
+; RV64IZFH-NEXT:    ret
+;
+; RV64IDZFH-LABEL: llround_f16:
+; RV64IDZFH:       # %bb.0:
+; RV64IDZFH-NEXT:    fcvt.l.h a0, fa0, rmm
+; RV64IDZFH-NEXT:    ret
+  %1 = call i64 @llvm.llround.i64.f16(half %a)
+  ret i64 %1
+}

diff  --git a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll
new file mode 100644
index 000000000000..f6f011ecfe8b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll
@@ -0,0 +1,67 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+experimental-zfh \
+; RUN:   -verify-machineinstrs -target-abi ilp32f | \
+; RUN:   FileCheck -check-prefix=RV32IZFH %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+experimental-zfh \
+; RUN:   -verify-machineinstrs -target-abi lp64f | \
+; RUN:   FileCheck -check-prefix=RV64IZFH %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d \
+; RUN:   -mattr=+experimental-zfh -verify-machineinstrs -target-abi ilp32d | \
+; RUN:   FileCheck -check-prefix=RV32IDZFH %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \
+; RUN:   -mattr=+experimental-zfh -verify-machineinstrs -target-abi lp64d | \
+; RUN:   FileCheck -check-prefix=RV64IDZFH %s
+
+; These intrinsics require half to be a legal type.
+
+declare iXLen @llvm.lrint.iXLen.f16(half)
+
+define iXLen @lrint_f16(half %a) nounwind {
+; RV32IZFH-LABEL: lrint_f16:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: lrint_f16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0
+; RV64IZFH-NEXT:    ret
+;
+; RV32IDZFH-LABEL: lrint_f16:
+; RV32IDZFH:       # %bb.0:
+; RV32IDZFH-NEXT:    fcvt.w.h a0, fa0
+; RV32IDZFH-NEXT:    ret
+;
+; RV64IDZFH-LABEL: lrint_f16:
+; RV64IDZFH:       # %bb.0:
+; RV64IDZFH-NEXT:    fcvt.l.h a0, fa0
+; RV64IDZFH-NEXT:    ret
+  %1 = call iXLen @llvm.lrint.iXLen.f16(half %a)
+  ret iXLen %1
+}
+
+declare iXLen @llvm.lround.iXLen.f16(half)
+
+define iXLen @lround_f16(half %a) nounwind {
+; RV32IZFH-LABEL: lround_f16:
+; RV32IZFH:       # %bb.0:
+; RV32IZFH-NEXT:    fcvt.w.h a0, fa0, rmm
+; RV32IZFH-NEXT:    ret
+;
+; RV64IZFH-LABEL: lround_f16:
+; RV64IZFH:       # %bb.0:
+; RV64IZFH-NEXT:    fcvt.l.h a0, fa0, rmm
+; RV64IZFH-NEXT:    ret
+;
+; RV32IDZFH-LABEL: lround_f16:
+; RV32IDZFH:       # %bb.0:
+; RV32IDZFH-NEXT:    fcvt.w.h a0, fa0, rmm
+; RV32IDZFH-NEXT:    ret
+;
+; RV64IDZFH-LABEL: lround_f16:
+; RV64IDZFH:       # %bb.0:
+; RV64IDZFH-NEXT:    fcvt.l.h a0, fa0, rmm
+; RV64IDZFH-NEXT:    ret
+  %1 = call iXLen @llvm.lround.iXLen.f16(half %a)
+  ret iXLen %1
+}


        


More information about the llvm-commits mailing list