[llvm] 555e030 - [RISCV] Match ext + ext + srem + trunc to vrem.vv

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 13 22:33:06 PDT 2023


Author: LWenH
Date: 2023-08-13T22:14:43-07:00
New Revision: 555e0305fda5a502f85045142fa2016743fd73f8

URL: https://github.com/llvm/llvm-project/commit/555e0305fda5a502f85045142fa2016743fd73f8
DIFF: https://github.com/llvm/llvm-project/commit/555e0305fda5a502f85045142fa2016743fd73f8.diff

LOG: [RISCV]  Match  ext + ext + srem + trunc to vrem.vv

This patch match the SDNode pattern:" trunc (srem(sext, ext))" to vrem.vv. This could remove the extra "vsext" ,"vnsrl" and the "vsetvli" instructions in the case like "c[i] = a[i] % b[i]", where the element types in the array are all int8_t or int16_t at the same time.

For element types like uint8_t or uint16_t, the "zext + zext + urem + trunc" based redundant IR have been removed during the instCombine pass, this is because the urem operation won't lead to the overflowed in the LLVM.  However, for signed types, the instCombine pass can not remove such patterns due to the potential for Undefined Behavior in LLVM IR. Taking an example, -128 % -1 will lead to the Undefined Behaviour(overflowed) under the i8 type in LLVM IR, but this situation doesn't occur for i32.  To address this,  LLVM first signed extends the operands for srem to i32 to prevent the UB.

For RVV,  such overflow operations are already defined by the specification and yield deterministic output for extreme inputs. For example, based on the spec, for the i8 type, -128 % -1 actually have 0 as the output result under the overflowed situation. Therefore, it would be able to match such pattern in the instruction selection phase for the rvv backend rather than removing them in the target-independent optimization passes like instCombine pass.

This patch only handle the sign_ext circumstances for srem.  For more information about the C test cases compared with  GCC, please see : https://gcc.godbolt.org/z/MWzE7WaT4

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D156685

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
    llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index be132e1f066158..20334ad8e4859a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -1052,6 +1052,23 @@ defm : VPatBinarySDNode_VV_VX<sdiv, "PseudoVDIV", isSEWAware=1>;
 defm : VPatBinarySDNode_VV_VX<urem, "PseudoVREMU", isSEWAware=1>;
 defm : VPatBinarySDNode_VV_VX<srem, "PseudoVREM", isSEWAware=1>;
 
+foreach vtiTowti = AllWidenableIntVectors in {
+  defvar vti = vtiTowti.Vti;
+  defvar wti = vtiTowti.Wti;
+  let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
+                               GetVTypePredicates<wti>.Predicates) in {
+  def : Pat<
+    (vti.Vector 
+      (riscv_trunc_vector_vl
+        (srem (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs1))),
+              (wti.Vector (sext_oneuse (vti.Vector vti.RegClass:$rs2)))),
+        (vti.Mask true_mask), (XLenVT srcvalue))),
+      (!cast<Instruction>("PseudoVREM_VV_"#vti.LMul.MX#"_E"#!shl(1, vti.Log2SEW))
+        (vti.Vector (IMPLICIT_DEF)), 
+        vti.RegClass:$rs1, vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>;
+  }
+}
+
 // 11.12. Vector Widening Integer Multiply Instructions
 defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, sext_oneuse,
                                    "PseudoVWMUL">;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
index cc822bb2a325b4..d407cf43a4fc1b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
@@ -48,12 +48,8 @@ define <vscale x 1 x i8> @vrem_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 define <vscale x 1 x i8> @vrem_vv_nxv1i8_sext_twice(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv1i8_sext_twice:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vsext.vf2 v10, v8
-; CHECK-NEXT:    vsext.vf2 v8, v9
-; CHECK-NEXT:    vrem.vv v8, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %sext_va = sext <vscale x 1 x i8> %va to <vscale x 1 x i16>
   %sext_vb = sext <vscale x 1 x i8> %vb to <vscale x 1 x i16>
@@ -106,12 +102,8 @@ define <vscale x 2 x i8> @vrem_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 define <vscale x 2 x i8> @vrem_vv_nxv2i8_sext_twice(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv2i8_sext_twice:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vsext.vf2 v10, v8
-; CHECK-NEXT:    vsext.vf2 v8, v9
-; CHECK-NEXT:    vrem.vv v8, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %sext_va = sext <vscale x 2 x i8> %va to <vscale x 2 x i16>
   %sext_vb = sext <vscale x 2 x i8> %vb to <vscale x 2 x i16>
@@ -164,12 +156,8 @@ define <vscale x 4 x i8> @vrem_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 define <vscale x 4 x i8> @vrem_vv_nxv4i8_sext_twice(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv4i8_sext_twice:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vsext.vf2 v10, v8
-; CHECK-NEXT:    vsext.vf2 v8, v9
-; CHECK-NEXT:    vrem.vv v8, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %sext_va = sext <vscale x 4 x i8> %va to <vscale x 4 x i16>
   %sext_vb = sext <vscale x 4 x i8> %vb to <vscale x 4 x i16>
@@ -222,12 +210,8 @@ define <vscale x 8 x i8> @vrem_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 define <vscale x 8 x i8> @vrem_vv_nxv8i8_sext_twice(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv8i8_sext_twice:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vsext.vf2 v10, v8
-; CHECK-NEXT:    vsext.vf2 v12, v9
-; CHECK-NEXT:    vrem.vv v10, v10, v12
-; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %sext_va = sext <vscale x 8 x i8> %va to <vscale x 8 x i16>
   %sext_vb = sext <vscale x 8 x i8> %vb to <vscale x 8 x i16>
@@ -280,12 +264,8 @@ define <vscale x 16 x i8> @vrem_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 define <vscale x 16 x i8> @vrem_vv_nxv16i8_sext_twice(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv16i8_sext_twice:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vsext.vf2 v12, v8
-; CHECK-NEXT:    vsext.vf2 v16, v10
-; CHECK-NEXT:    vrem.vv v12, v12, v16
-; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vrem.vv v8, v8, v10
 ; CHECK-NEXT:    ret
   %sext_va = sext <vscale x 16 x i8> %va to <vscale x 16 x i16>
   %sext_vb = sext <vscale x 16 x i8> %vb to <vscale x 16 x i16>
@@ -338,12 +318,8 @@ define <vscale x 32 x i8> @vrem_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 define <vscale x 32 x i8> @vrem_vv_nxv32i8_sext_twice(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv32i8_sext_twice:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vsext.vf2 v16, v8
-; CHECK-NEXT:    vsext.vf2 v24, v12
-; CHECK-NEXT:    vrem.vv v16, v16, v24
-; CHECK-NEXT:    vsetvli zero, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT:    vrem.vv v8, v8, v12
 ; CHECK-NEXT:    ret
   %sext_va = sext <vscale x 32 x i8> %va to <vscale x 32 x i16>
   %sext_vb = sext <vscale x 32 x i8> %vb to <vscale x 32 x i16>
@@ -450,12 +426,8 @@ define <vscale x 1 x i16> @vrem_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 define <vscale x 1 x i16> @vrem_vv_nxv1i16_sext_twice(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv1i16_sext_twice:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vsext.vf2 v10, v8
-; CHECK-NEXT:    vsext.vf2 v8, v9
-; CHECK-NEXT:    vrem.vv v8, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %sext_va = sext <vscale x 1 x i16> %va to <vscale x 1 x i32>
   %sext_vb = sext <vscale x 1 x i16> %vb to <vscale x 1 x i32>
@@ -521,12 +493,8 @@ define <vscale x 2 x i16> @vrem_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 define <vscale x 2 x i16> @vrem_vv_nxv2i16_sext_twice(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv2i16_sext_twice:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vsext.vf2 v10, v8
-; CHECK-NEXT:    vsext.vf2 v8, v9
-; CHECK-NEXT:    vrem.vv v8, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %sext_va = sext <vscale x 2 x i16> %va to <vscale x 2 x i32>
   %sext_vb = sext <vscale x 2 x i16> %vb to <vscale x 2 x i32>
@@ -592,12 +560,8 @@ define <vscale x 4 x i16> @vrem_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 define <vscale x 4 x i16> @vrem_vv_nxv4i16_sext_twice(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv4i16_sext_twice:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vsext.vf2 v10, v8
-; CHECK-NEXT:    vsext.vf2 v12, v9
-; CHECK-NEXT:    vrem.vv v10, v10, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vrem.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %sext_va = sext <vscale x 4 x i16> %va to <vscale x 4 x i32>
   %sext_vb = sext <vscale x 4 x i16> %vb to <vscale x 4 x i32>
@@ -663,12 +627,8 @@ define <vscale x 8 x i16> @vrem_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 define <vscale x 8 x i16> @vrem_vv_nxv8i16_sext_twice(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv8i16_sext_twice:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vsext.vf2 v12, v8
-; CHECK-NEXT:    vsext.vf2 v16, v10
-; CHECK-NEXT:    vrem.vv v12, v12, v16
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vrem.vv v8, v8, v10
 ; CHECK-NEXT:    ret
   %sext_va = sext <vscale x 8 x i16> %va to <vscale x 8 x i32>
   %sext_vb = sext <vscale x 8 x i16> %vb to <vscale x 8 x i32>
@@ -734,12 +694,8 @@ define <vscale x 16 x i16> @vrem_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 define <vscale x 16 x i16> @vrem_vv_nxv16i16_sext_twice(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv16i16_sext_twice:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vsext.vf2 v16, v8
-; CHECK-NEXT:    vsext.vf2 v24, v12
-; CHECK-NEXT:    vrem.vv v16, v16, v24
-; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vrem.vv v8, v8, v12
 ; CHECK-NEXT:    ret
   %sext_va = sext <vscale x 16 x i16> %va to <vscale x 16 x i32>
   %sext_vb = sext <vscale x 16 x i16> %vb to <vscale x 16 x i32>


        


More information about the llvm-commits mailing list