[llvm] 7a76038 - CodeGen/RISCV: increase test coverage of lrint, llrint (#70826)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 31 12:16:43 PDT 2023
Author: Ramkumar Ramachandra
Date: 2023-10-31T19:16:39Z
New Revision: 7a7603883506e1b02be9da560edc6d75d440a1e9
URL: https://github.com/llvm/llvm-project/commit/7a7603883506e1b02be9da560edc6d75d440a1e9
DIFF: https://github.com/llvm/llvm-project/commit/7a7603883506e1b02be9da560edc6d75d440a1e9.diff
LOG: CodeGen/RISCV: increase test coverage of lrint, llrint (#70826)
To follow up on 98c90a1 (ISel: introduce vector ISD::LRINT, ISD::LLRINT;
custom RISCV lowering), increase the test coverage to test the codegen
of the i32-variant of lrint on RV64, and llrint on RV32.
Added:
Modified:
llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll
llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll
index 9a485a8b58be145..818abb9f4a00996 100644
--- a/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/llrint-sdnode.ll
@@ -1,4 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d -target-abi=lp64d \
; RUN: -verify-machineinstrs < %s | FileCheck %s
diff --git a/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll
index 61a5367b7fc5c9f..e75ea700df4f1a9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/lrint-sdnode.ll
@@ -1,8 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+f,+d \
; RUN: -target-abi=ilp32d -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d \
+; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i32
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d \
-; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i64
define <vscale x 1 x iXLen> @lrint_nxv1f32(<vscale x 1 x float> %x) {
; RV32-LABEL: lrint_nxv1f32:
@@ -11,12 +13,18 @@ define <vscale x 1 x iXLen> @lrint_nxv1f32(<vscale x 1 x float> %x) {
; RV32-NEXT: vfcvt.x.f.v v8, v8
; RV32-NEXT: ret
;
-; RV64-LABEL: lrint_nxv1f32:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
-; RV64-NEXT: vfwcvt.x.f.v v9, v8
-; RV64-NEXT: vmv1r.v v8, v9
-; RV64-NEXT: ret
+; RV64-i32-LABEL: lrint_nxv1f32:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v8
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv1f32:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v9, v8
+; RV64-i64-NEXT: vmv1r.v v8, v9
+; RV64-i64-NEXT: ret
%a = call <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f32(<vscale x 1 x float> %x)
ret <vscale x 1 x iXLen> %a
}
@@ -29,12 +37,18 @@ define <vscale x 2 x iXLen> @lrint_nxv2f32(<vscale x 2 x float> %x) {
; RV32-NEXT: vfcvt.x.f.v v8, v8
; RV32-NEXT: ret
;
-; RV64-LABEL: lrint_nxv2f32:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
-; RV64-NEXT: vfwcvt.x.f.v v10, v8
-; RV64-NEXT: vmv2r.v v8, v10
-; RV64-NEXT: ret
+; RV64-i32-LABEL: lrint_nxv2f32:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v8
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv2f32:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v10, v8
+; RV64-i64-NEXT: vmv2r.v v8, v10
+; RV64-i64-NEXT: ret
%a = call <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f32(<vscale x 2 x float> %x)
ret <vscale x 2 x iXLen> %a
}
@@ -47,12 +61,18 @@ define <vscale x 4 x iXLen> @lrint_nxv4f32(<vscale x 4 x float> %x) {
; RV32-NEXT: vfcvt.x.f.v v8, v8
; RV32-NEXT: ret
;
-; RV64-LABEL: lrint_nxv4f32:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; RV64-NEXT: vfwcvt.x.f.v v12, v8
-; RV64-NEXT: vmv4r.v v8, v12
-; RV64-NEXT: ret
+; RV64-i32-LABEL: lrint_nxv4f32:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v8
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv4f32:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v12, v8
+; RV64-i64-NEXT: vmv4r.v v8, v12
+; RV64-i64-NEXT: ret
%a = call <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f32(<vscale x 4 x float> %x)
ret <vscale x 4 x iXLen> %a
}
@@ -65,12 +85,18 @@ define <vscale x 8 x iXLen> @lrint_nxv8f32(<vscale x 8 x float> %x) {
; RV32-NEXT: vfcvt.x.f.v v8, v8
; RV32-NEXT: ret
;
-; RV64-LABEL: lrint_nxv8f32:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; RV64-NEXT: vfwcvt.x.f.v v16, v8
-; RV64-NEXT: vmv8r.v v8, v16
-; RV64-NEXT: ret
+; RV64-i32-LABEL: lrint_nxv8f32:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; RV64-i32-NEXT: vfcvt.x.f.v v8, v8
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv8f32:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; RV64-i64-NEXT: vfwcvt.x.f.v v16, v8
+; RV64-i64-NEXT: vmv8r.v v8, v16
+; RV64-i64-NEXT: ret
%a = call <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f32(<vscale x 8 x float> %x)
ret <vscale x 8 x iXLen> %a
}
@@ -90,11 +116,18 @@ define <vscale x 1 x iXLen> @lrint_nxv1f64(<vscale x 1 x double> %x) {
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
-; RV64-LABEL: lrint_nxv1f64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; RV64-NEXT: vfcvt.x.f.v v8, v8
-; RV64-NEXT: ret
+; RV64-i32-LABEL: lrint_nxv1f64:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT: vfncvt.x.f.w v9, v8
+; RV64-i32-NEXT: vmv1r.v v8, v9
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv1f64:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-i64-NEXT: vfcvt.x.f.v v8, v8
+; RV64-i64-NEXT: ret
%a = call <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f64(<vscale x 1 x double> %x)
ret <vscale x 1 x iXLen> %a
}
@@ -108,11 +141,18 @@ define <vscale x 2 x iXLen> @lrint_nxv2f64(<vscale x 2 x double> %x) {
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
-; RV64-LABEL: lrint_nxv2f64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; RV64-NEXT: vfcvt.x.f.v v8, v8
-; RV64-NEXT: ret
+; RV64-i32-LABEL: lrint_nxv2f64:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; RV64-i32-NEXT: vfncvt.x.f.w v10, v8
+; RV64-i32-NEXT: vmv.v.v v8, v10
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv2f64:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64-i64-NEXT: vfcvt.x.f.v v8, v8
+; RV64-i64-NEXT: ret
%a = call <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f64(<vscale x 2 x double> %x)
ret <vscale x 2 x iXLen> %a
}
@@ -126,11 +166,18 @@ define <vscale x 4 x iXLen> @lrint_nxv4f64(<vscale x 4 x double> %x) {
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
-; RV64-LABEL: lrint_nxv4f64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; RV64-NEXT: vfcvt.x.f.v v8, v8
-; RV64-NEXT: ret
+; RV64-i32-LABEL: lrint_nxv4f64:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-i32-NEXT: vfncvt.x.f.w v12, v8
+; RV64-i32-NEXT: vmv.v.v v8, v12
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv4f64:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-i64-NEXT: vfcvt.x.f.v v8, v8
+; RV64-i64-NEXT: ret
%a = call <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f64(<vscale x 4 x double> %x)
ret <vscale x 4 x iXLen> %a
}
@@ -144,11 +191,18 @@ define <vscale x 8 x iXLen> @lrint_nxv8f64(<vscale x 8 x double> %x) {
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
-; RV64-LABEL: lrint_nxv8f64:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; RV64-NEXT: vfcvt.x.f.v v8, v8
-; RV64-NEXT: ret
+; RV64-i32-LABEL: lrint_nxv8f64:
+; RV64-i32: # %bb.0:
+; RV64-i32-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; RV64-i32-NEXT: vfncvt.x.f.w v16, v8
+; RV64-i32-NEXT: vmv.v.v v8, v16
+; RV64-i32-NEXT: ret
+;
+; RV64-i64-LABEL: lrint_nxv8f64:
+; RV64-i64: # %bb.0:
+; RV64-i64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64-i64-NEXT: vfcvt.x.f.v v8, v8
+; RV64-i64-NEXT: ret
%a = call <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f64(<vscale x 8 x double> %x)
ret <vscale x 8 x iXLen> %a
}
More information about the llvm-commits
mailing list