[llvm] 6cb55a3 - [RISCV] Add Precommit test for D156685

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 13 22:33:04 PDT 2023


Author: LWenH
Date: 2023-08-13T22:14:38-07:00
New Revision: 6cb55a3d9a7a8d749f446fd7368f35e52a8b461f

URL: https://github.com/llvm/llvm-project/commit/6cb55a3d9a7a8d749f446fd7368f35e52a8b461f
DIFF: https://github.com/llvm/llvm-project/commit/6cb55a3d9a7a8d749f446fd7368f35e52a8b461f.diff

LOG: [RISCV] Add Precommit test for D156685

Add baseline test for [[ https://reviews.llvm.org/D156685 | D156685 ]].

In LLVM, such signed 8 bits reaminder operation will first signed extened the operands to 32 bits, and then narrow the operands to the smaller bits data type such as 16 bits during the CorrelatedValuePropagation Pass to optimize the final data storage size.

Such a signed extension operation for srem in LLVM system is to prevent the Undefined Behavior.  Taking an example, -128 % -1 will lead to the Undefined Behaviour under the i8 type in LLVM IR, but this won't happen for i32, so such pattern cannot be eliminated in the platform-independent InstCombine Pass. The LLVM IR of these sext/trunc operations will be translated one by one during the RVV backend code generation process, and redundant vsetvli instructions will be inserted.

In fact, according to the RVV instruction manual, the vrem.vv instruction has already specified the final output value of this type of overflow operation. For example, the overflow operation of -128 % -1 will get 0 according to the RISC-V spec, so through this patch , I think we can optimize these redundant rvv code through the SDNode pattern match at the instruction selection phase.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D157592

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
index 19aea69c0a32f1..cc822bb2a325b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
@@ -45,6 +45,23 @@ define <vscale x 1 x i8> @vrem_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
   ret <vscale x 1 x i8> %vc
 }
 
+define <vscale x 1 x i8> @vrem_vv_nxv1i8_sext_twice(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
+; CHECK-LABEL: vrem_vv_nxv1i8_sext_twice:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vsext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf2 v8, v9
+; CHECK-NEXT:    vrem.vv v8, v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %sext_va = sext <vscale x 1 x i8> %va to <vscale x 1 x i16>
+  %sext_vb = sext <vscale x 1 x i8> %vb to <vscale x 1 x i16>
+  %vc_ext = srem <vscale x 1 x i16> %sext_va, %sext_vb
+  %vc = trunc <vscale x 1 x i16> %vc_ext to <vscale x 1 x i8>
+  ret <vscale x 1 x i8> %vc
+}
+
 define <vscale x 2 x i8> @vrem_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv2i8:
 ; CHECK:       # %bb.0:
@@ -86,6 +103,23 @@ define <vscale x 2 x i8> @vrem_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
   ret <vscale x 2 x i8> %vc
 }
 
+define <vscale x 2 x i8> @vrem_vv_nxv2i8_sext_twice(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
+; CHECK-LABEL: vrem_vv_nxv2i8_sext_twice:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vsext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf2 v8, v9
+; CHECK-NEXT:    vrem.vv v8, v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %sext_va = sext <vscale x 2 x i8> %va to <vscale x 2 x i16>
+  %sext_vb = sext <vscale x 2 x i8> %vb to <vscale x 2 x i16>
+  %vc_ext = srem <vscale x 2 x i16> %sext_va, %sext_vb
+  %vc = trunc <vscale x 2 x i16> %vc_ext to <vscale x 2 x i8>
+  ret <vscale x 2 x i8> %vc
+}
+
 define <vscale x 4 x i8> @vrem_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv4i8:
 ; CHECK:       # %bb.0:
@@ -127,6 +161,23 @@ define <vscale x 4 x i8> @vrem_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
   ret <vscale x 4 x i8> %vc
 }
 
+define <vscale x 4 x i8> @vrem_vv_nxv4i8_sext_twice(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
+; CHECK-LABEL: vrem_vv_nxv4i8_sext_twice:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vsext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf2 v8, v9
+; CHECK-NEXT:    vrem.vv v8, v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %sext_va = sext <vscale x 4 x i8> %va to <vscale x 4 x i16>
+  %sext_vb = sext <vscale x 4 x i8> %vb to <vscale x 4 x i16>
+  %vc_ext = srem <vscale x 4 x i16> %sext_va, %sext_vb
+  %vc = trunc <vscale x 4 x i16> %vc_ext to <vscale x 4 x i8>
+  ret <vscale x 4 x i8> %vc
+}
+
 define <vscale x 8 x i8> @vrem_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv8i8:
 ; CHECK:       # %bb.0:
@@ -168,6 +219,23 @@ define <vscale x 8 x i8> @vrem_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
   ret <vscale x 8 x i8> %vc
 }
 
+define <vscale x 8 x i8> @vrem_vv_nxv8i8_sext_twice(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: vrem_vv_nxv8i8_sext_twice:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vsext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf2 v12, v9
+; CHECK-NEXT:    vrem.vv v10, v10, v12
+; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    ret
+  %sext_va = sext <vscale x 8 x i8> %va to <vscale x 8 x i16>
+  %sext_vb = sext <vscale x 8 x i8> %vb to <vscale x 8 x i16>
+  %vc_ext = srem <vscale x 8 x i16> %sext_va, %sext_vb
+  %vc = trunc <vscale x 8 x i16> %vc_ext to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %vc
+}
+
 define <vscale x 16 x i8> @vrem_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv16i8:
 ; CHECK:       # %bb.0:
@@ -209,6 +277,23 @@ define <vscale x 16 x i8> @vrem_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
   ret <vscale x 16 x i8> %vc
 }
 
+define <vscale x 16 x i8> @vrem_vv_nxv16i8_sext_twice(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
+; CHECK-LABEL: vrem_vv_nxv16i8_sext_twice:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vsext.vf2 v12, v8
+; CHECK-NEXT:    vsext.vf2 v16, v10
+; CHECK-NEXT:    vrem.vv v12, v12, v16
+; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    ret
+  %sext_va = sext <vscale x 16 x i8> %va to <vscale x 16 x i16>
+  %sext_vb = sext <vscale x 16 x i8> %vb to <vscale x 16 x i16>
+  %vc_ext = srem <vscale x 16 x i16> %sext_va, %sext_vb
+  %vc = trunc <vscale x 16 x i16> %vc_ext to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %vc
+}
+
 define <vscale x 32 x i8> @vrem_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv32i8:
 ; CHECK:       # %bb.0:
@@ -250,6 +335,23 @@ define <vscale x 32 x i8> @vrem_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
   ret <vscale x 32 x i8> %vc
 }
 
+define <vscale x 32 x i8> @vrem_vv_nxv32i8_sext_twice(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
+; CHECK-LABEL: vrem_vv_nxv32i8_sext_twice:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vsext.vf2 v16, v8
+; CHECK-NEXT:    vsext.vf2 v24, v12
+; CHECK-NEXT:    vrem.vv v16, v16, v24
+; CHECK-NEXT:    vsetvli zero, zero, e8, m4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    ret
+  %sext_va = sext <vscale x 32 x i8> %va to <vscale x 32 x i16>
+  %sext_vb = sext <vscale x 32 x i8> %vb to <vscale x 32 x i16>
+  %vc_ext = srem <vscale x 32 x i16> %sext_va, %sext_vb
+  %vc = trunc <vscale x 32 x i16> %vc_ext to <vscale x 32 x i8>
+  ret <vscale x 32 x i8> %vc
+}
+
 define <vscale x 64 x i8> @vrem_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv64i8:
 ; CHECK:       # %bb.0:
@@ -345,6 +447,23 @@ define <vscale x 1 x i16> @vrem_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
   ret <vscale x 1 x i16> %vc
 }
 
+define <vscale x 1 x i16> @vrem_vv_nxv1i16_sext_twice(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
+; CHECK-LABEL: vrem_vv_nxv1i16_sext_twice:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vsext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf2 v8, v9
+; CHECK-NEXT:    vrem.vv v8, v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %sext_va = sext <vscale x 1 x i16> %va to <vscale x 1 x i32>
+  %sext_vb = sext <vscale x 1 x i16> %vb to <vscale x 1 x i32>
+  %vc_ext = srem <vscale x 1 x i32> %sext_va, %sext_vb
+  %vc = trunc <vscale x 1 x i32> %vc_ext to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %vc
+}
+
 define <vscale x 2 x i16> @vrem_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv2i16:
 ; CHECK:       # %bb.0:
@@ -399,6 +518,23 @@ define <vscale x 2 x i16> @vrem_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
   ret <vscale x 2 x i16> %vc
 }
 
+define <vscale x 2 x i16> @vrem_vv_nxv2i16_sext_twice(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
+; CHECK-LABEL: vrem_vv_nxv2i16_sext_twice:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vsext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf2 v8, v9
+; CHECK-NEXT:    vrem.vv v8, v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %sext_va = sext <vscale x 2 x i16> %va to <vscale x 2 x i32>
+  %sext_vb = sext <vscale x 2 x i16> %vb to <vscale x 2 x i32>
+  %vc_ext = srem <vscale x 2 x i32> %sext_va, %sext_vb
+  %vc = trunc <vscale x 2 x i32> %vc_ext to <vscale x 2 x i16>
+  ret <vscale x 2 x i16> %vc
+}
+
 define <vscale x 4 x i16> @vrem_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv4i16:
 ; CHECK:       # %bb.0:
@@ -453,6 +589,23 @@ define <vscale x 4 x i16> @vrem_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
   ret <vscale x 4 x i16> %vc
 }
 
+define <vscale x 4 x i16> @vrem_vv_nxv4i16_sext_twice(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
+; CHECK-LABEL: vrem_vv_nxv4i16_sext_twice:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vsext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf2 v12, v9
+; CHECK-NEXT:    vrem.vv v10, v10, v12
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    ret
+  %sext_va = sext <vscale x 4 x i16> %va to <vscale x 4 x i32>
+  %sext_vb = sext <vscale x 4 x i16> %vb to <vscale x 4 x i32>
+  %vc_ext = srem <vscale x 4 x i32> %sext_va, %sext_vb
+  %vc = trunc <vscale x 4 x i32> %vc_ext to <vscale x 4 x i16>
+  ret <vscale x 4 x i16> %vc
+}
+
 define <vscale x 8 x i16> @vrem_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv8i16:
 ; CHECK:       # %bb.0:
@@ -507,6 +660,23 @@ define <vscale x 8 x i16> @vrem_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
   ret <vscale x 8 x i16> %vc
 }
 
+define <vscale x 8 x i16> @vrem_vv_nxv8i16_sext_twice(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: vrem_vv_nxv8i16_sext_twice:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vsext.vf2 v12, v8
+; CHECK-NEXT:    vsext.vf2 v16, v10
+; CHECK-NEXT:    vrem.vv v12, v12, v16
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    ret
+  %sext_va = sext <vscale x 8 x i16> %va to <vscale x 8 x i32>
+  %sext_vb = sext <vscale x 8 x i16> %vb to <vscale x 8 x i32>
+  %vc_ext = srem <vscale x 8 x i32> %sext_va, %sext_vb
+  %vc = trunc <vscale x 8 x i32> %vc_ext to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %vc
+}
+
 define <vscale x 16 x i16> @vrem_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv16i16:
 ; CHECK:       # %bb.0:
@@ -561,6 +731,23 @@ define <vscale x 16 x i16> @vrem_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
   ret <vscale x 16 x i16> %vc
 }
 
+define <vscale x 16 x i16> @vrem_vv_nxv16i16_sext_twice(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
+; CHECK-LABEL: vrem_vv_nxv16i16_sext_twice:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vsext.vf2 v16, v8
+; CHECK-NEXT:    vsext.vf2 v24, v12
+; CHECK-NEXT:    vrem.vv v16, v16, v24
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    ret
+  %sext_va = sext <vscale x 16 x i16> %va to <vscale x 16 x i32>
+  %sext_vb = sext <vscale x 16 x i16> %vb to <vscale x 16 x i32>
+  %vc_ext = srem <vscale x 16 x i32> %sext_va, %sext_vb
+  %vc = trunc <vscale x 16 x i32> %vc_ext to <vscale x 16 x i16>
+  ret <vscale x 16 x i16> %vc
+}
+
 define <vscale x 32 x i16> @vrem_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv32i16:
 ; CHECK:       # %bb.0:
@@ -963,8 +1150,8 @@ define <vscale x 1 x i64> @vrem_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ;
 ; RV64-V-LABEL: vrem_vi_nxv1i64_0:
 ; RV64-V:       # %bb.0:
-; RV64-V-NEXT:    lui a0, %hi(.LCPI56_0)
-; RV64-V-NEXT:    ld a0, %lo(.LCPI56_0)(a0)
+; RV64-V-NEXT:    lui a0, %hi(.LCPI67_0)
+; RV64-V-NEXT:    ld a0, %lo(.LCPI67_0)(a0)
 ; RV64-V-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; RV64-V-NEXT:    vmulh.vx v9, v8, a0
 ; RV64-V-NEXT:    li a0, 63
@@ -1048,8 +1235,8 @@ define <vscale x 2 x i64> @vrem_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ;
 ; RV64-V-LABEL: vrem_vi_nxv2i64_0:
 ; RV64-V:       # %bb.0:
-; RV64-V-NEXT:    lui a0, %hi(.LCPI59_0)
-; RV64-V-NEXT:    ld a0, %lo(.LCPI59_0)(a0)
+; RV64-V-NEXT:    lui a0, %hi(.LCPI70_0)
+; RV64-V-NEXT:    ld a0, %lo(.LCPI70_0)(a0)
 ; RV64-V-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; RV64-V-NEXT:    vmulh.vx v10, v8, a0
 ; RV64-V-NEXT:    li a0, 63
@@ -1133,8 +1320,8 @@ define <vscale x 4 x i64> @vrem_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ;
 ; RV64-V-LABEL: vrem_vi_nxv4i64_0:
 ; RV64-V:       # %bb.0:
-; RV64-V-NEXT:    lui a0, %hi(.LCPI62_0)
-; RV64-V-NEXT:    ld a0, %lo(.LCPI62_0)(a0)
+; RV64-V-NEXT:    lui a0, %hi(.LCPI73_0)
+; RV64-V-NEXT:    ld a0, %lo(.LCPI73_0)(a0)
 ; RV64-V-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; RV64-V-NEXT:    vmulh.vx v12, v8, a0
 ; RV64-V-NEXT:    li a0, 63
@@ -1218,8 +1405,8 @@ define <vscale x 8 x i64> @vrem_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ;
 ; RV64-V-LABEL: vrem_vi_nxv8i64_0:
 ; RV64-V:       # %bb.0:
-; RV64-V-NEXT:    lui a0, %hi(.LCPI65_0)
-; RV64-V-NEXT:    ld a0, %lo(.LCPI65_0)(a0)
+; RV64-V-NEXT:    lui a0, %hi(.LCPI76_0)
+; RV64-V-NEXT:    ld a0, %lo(.LCPI76_0)(a0)
 ; RV64-V-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; RV64-V-NEXT:    vmulh.vx v16, v8, a0
 ; RV64-V-NEXT:    li a0, 63


        


More information about the llvm-commits mailing list