[llvm] [RISCV] Combine trunc (srl zext (x), zext (y)) to srl (x, umin (y, scalarsizeinbits(y) - 1)) (PR #69092)

via llvm-commits llvm-commits at lists.llvm.org
Sat Oct 14 23:40:45 PDT 2023


https://github.com/LWenH created https://github.com/llvm/llvm-project/pull/69092

Like #65728, for i8/i16 element-wise vector logical right shift, the source value and the shift amount 
would first be **zero-extended** to i32, then the **vsrl** instruction is performed, followed by a **truncation** 
to obtain the final calculation result. This would be expanded into a series of "vsetvli" and "vnsrl" instructions 
later. For RVV, the **vsrl** instruction only treats the **lg2(sew)** bits as the shift amount, so we can also get 
the shift amount by using umin(Y,  scalarsize(Y) - 1).

>From 745368f5d7e7975b19e1861061fca111d93bb459 Mon Sep 17 00:00:00 2001
From: LWenH <924105575 at qq.com>
Date: Sun, 15 Oct 2023 11:13:46 +0800
Subject: [PATCH 1/2] [RISCV] Add pre-commit tests for later trunc + srl
 combine

---
 llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll | 170 +++++++++++++++++++++
 1 file changed, 170 insertions(+)

diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll
index be70b20181b1484..1807d42769b4e62 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll
@@ -26,6 +26,25 @@ define <vscale x 1 x i8> @vsrl_vx_nxv1i8_0(<vscale x 1 x i8> %va) {
   ret <vscale x 1 x i8> %vc
 }
 
+define <vscale x 1 x i8> @vsrl_vv_nxv1i8_zext_zext(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
+; CHECK-LABEL: vsrl_vv_nxv1i8_zext_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vzext.vf4 v10, v8
+; CHECK-NEXT:    vzext.vf4 v8, v9
+; CHECK-NEXT:    vsrl.vv v8, v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %zexted_va = zext <vscale x 1 x i8> %va to <vscale x 1 x i32>
+  %zexted_vb = zext <vscale x 1 x i8> %vb to <vscale x 1 x i32>
+  %expand = lshr <vscale x 1 x i32> %zexted_va, %zexted_vb
+  %vc = trunc <vscale x 1 x i32> %expand to <vscale x 1 x i8>
+  ret <vscale x 1 x i8> %vc
+}
+
 define <vscale x 2 x i8> @vsrl_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
 ; CHECK-LABEL: vsrl_vx_nxv2i8:
 ; CHECK:       # %bb.0:
@@ -50,6 +69,25 @@ define <vscale x 2 x i8> @vsrl_vx_nxv2i8_0(<vscale x 2 x i8> %va) {
   ret <vscale x 2 x i8> %vc
 }
 
+define <vscale x 2 x i8> @vsrl_vv_nxv2i8_zext_zext(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
+; CHECK-LABEL: vsrl_vv_nxv2i8_zext_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vzext.vf4 v10, v8
+; CHECK-NEXT:    vzext.vf4 v8, v9
+; CHECK-NEXT:    vsrl.vv v8, v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %zexted_va = zext <vscale x 2 x i8> %va to <vscale x 2 x i32>
+  %zexted_vb = zext <vscale x 2 x i8> %vb to <vscale x 2 x i32>
+  %expand = lshr <vscale x 2 x i32> %zexted_va, %zexted_vb
+  %vc = trunc <vscale x 2 x i32> %expand to <vscale x 2 x i8>
+  ret <vscale x 2 x i8> %vc
+}
+
 define <vscale x 4 x i8> @vsrl_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
 ; CHECK-LABEL: vsrl_vx_nxv4i8:
 ; CHECK:       # %bb.0:
@@ -74,6 +112,25 @@ define <vscale x 4 x i8> @vsrl_vx_nxv4i8_0(<vscale x 4 x i8> %va) {
   ret <vscale x 4 x i8> %vc
 }
 
+define <vscale x 4 x i8> @vsrl_vv_nxv4i8_zext_zext(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
+; CHECK-LABEL: vsrl_vv_nxv4i8_zext_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vzext.vf4 v10, v8
+; CHECK-NEXT:    vzext.vf4 v12, v9
+; CHECK-NEXT:    vsrl.vv v8, v10, v12
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    ret
+  %zexted_va = zext <vscale x 4 x i8> %va to <vscale x 4 x i32>
+  %zexted_vb = zext <vscale x 4 x i8> %vb to <vscale x 4 x i32>
+  %expand = lshr <vscale x 4 x i32> %zexted_va, %zexted_vb
+  %vc = trunc <vscale x 4 x i32> %expand to <vscale x 4 x i8>
+  ret <vscale x 4 x i8> %vc
+}
+
 define <vscale x 8 x i8> @vsrl_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
 ; CHECK-LABEL: vsrl_vx_nxv8i8:
 ; CHECK:       # %bb.0:
@@ -98,6 +155,25 @@ define <vscale x 8 x i8> @vsrl_vx_nxv8i8_0(<vscale x 8 x i8> %va) {
   ret <vscale x 8 x i8> %vc
 }
 
+define <vscale x 8 x i8> @vsrl_vv_nxv8i8_zext_zext(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: vsrl_vv_nxv8i8_zext_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vzext.vf4 v12, v8
+; CHECK-NEXT:    vzext.vf4 v16, v9
+; CHECK-NEXT:    vsrl.vv v8, v12, v16
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    ret
+  %zexted_va = zext <vscale x 8 x i8> %va to <vscale x 8 x i32>
+  %zexted_vb = zext <vscale x 8 x i8> %vb to <vscale x 8 x i32>
+  %expand = lshr <vscale x 8 x i32> %zexted_va, %zexted_vb
+  %vc = trunc <vscale x 8 x i32> %expand to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %vc
+}
+
 define <vscale x 16 x i8> @vsrl_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
 ; CHECK-LABEL: vsrl_vx_nxv16i8:
 ; CHECK:       # %bb.0:
@@ -122,6 +198,25 @@ define <vscale x 16 x i8> @vsrl_vx_nxv16i8_0(<vscale x 16 x i8> %va) {
   ret <vscale x 16 x i8> %vc
 }
 
+define <vscale x 16 x i8> @vsrl_vv_nxv16i8_zext_zext(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
+; CHECK-LABEL: vsrl_vv_nxv16i8_zext_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vzext.vf4 v16, v8
+; CHECK-NEXT:    vzext.vf4 v24, v10
+; CHECK-NEXT:    vsrl.vv v8, v16, v24
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v16, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    ret
+  %zexted_va = zext <vscale x 16 x i8> %va to <vscale x 16 x i32>
+  %zexted_vb = zext <vscale x 16 x i8> %vb to <vscale x 16 x i32>
+  %expand = lshr <vscale x 16 x i32> %zexted_va, %zexted_vb
+  %vc = trunc <vscale x 16 x i32> %expand to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %vc
+}
+
 define <vscale x 32 x i8> @vsrl_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
 ; CHECK-LABEL: vsrl_vx_nxv32i8:
 ; CHECK:       # %bb.0:
@@ -194,6 +289,21 @@ define <vscale x 1 x i16> @vsrl_vx_nxv1i16_0(<vscale x 1 x i16> %va) {
   ret <vscale x 1 x i16> %vc
 }
 
+define <vscale x 1 x i16> @vsrl_vv_nxv1i16_zext_zext(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
+; CHECK-LABEL: vsrl_vv_nxv1i16_zext_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wv v8, v10, v9
+; CHECK-NEXT:    ret
+  %zexted_va = zext <vscale x 1 x i16> %va to <vscale x 1 x i32>
+  %zexted_vb = zext <vscale x 1 x i16> %vb to <vscale x 1 x i32>
+  %expand = lshr <vscale x 1 x i32> %zexted_va, %zexted_vb
+  %vc = trunc <vscale x 1 x i32> %expand to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %vc
+}
+
 define <vscale x 2 x i16> @vsrl_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
 ; CHECK-LABEL: vsrl_vx_nxv2i16:
 ; CHECK:       # %bb.0:
@@ -218,6 +328,21 @@ define <vscale x 2 x i16> @vsrl_vx_nxv2i16_0(<vscale x 2 x i16> %va) {
   ret <vscale x 2 x i16> %vc
 }
 
+define <vscale x 2 x i16> @vsrl_vv_nxv2i16_zext_zext(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
+; CHECK-LABEL: vsrl_vv_nxv2i16_zext_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wv v8, v10, v9
+; CHECK-NEXT:    ret
+  %zexted_va = zext <vscale x 2 x i16> %va to <vscale x 2 x i32>
+  %zexted_vb = zext <vscale x 2 x i16> %vb to <vscale x 2 x i32>
+  %expand = lshr <vscale x 2 x i32> %zexted_va, %zexted_vb
+  %vc = trunc <vscale x 2 x i32> %expand to <vscale x 2 x i16>
+  ret <vscale x 2 x i16> %vc
+}
+
 define <vscale x 4 x i16> @vsrl_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
 ; CHECK-LABEL: vsrl_vx_nxv4i16:
 ; CHECK:       # %bb.0:
@@ -242,6 +367,21 @@ define <vscale x 4 x i16> @vsrl_vx_nxv4i16_0(<vscale x 4 x i16> %va) {
   ret <vscale x 4 x i16> %vc
 }
 
+define <vscale x 4 x i16> @vsrl_vv_nxv4i16_zext_zext(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
+; CHECK-LABEL: vsrl_vv_nxv4i16_zext_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wv v8, v10, v9
+; CHECK-NEXT:    ret
+  %zexted_va = zext <vscale x 4 x i16> %va to <vscale x 4 x i32>
+  %zexted_vb = zext <vscale x 4 x i16> %vb to <vscale x 4 x i32>
+  %expand = lshr <vscale x 4 x i32> %zexted_va, %zexted_vb
+  %vc = trunc <vscale x 4 x i32> %expand to <vscale x 4 x i16>
+  ret <vscale x 4 x i16> %vc
+}
+
 define <vscale x 8 x i16> @vsrl_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
 ; CHECK-LABEL: vsrl_vx_nxv8i16:
 ; CHECK:       # %bb.0:
@@ -266,6 +406,21 @@ define <vscale x 8 x i16> @vsrl_vx_nxv8i16_0(<vscale x 8 x i16> %va) {
   ret <vscale x 8 x i16> %vc
 }
 
+define <vscale x 8 x i16> @vsrl_vv_nxv8i16_zext_zext(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: vsrl_vv_nxv8i16_zext_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vzext.vf2 v12, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wv v8, v12, v10
+; CHECK-NEXT:    ret
+  %zexted_va = zext <vscale x 8 x i16> %va to <vscale x 8 x i32>
+  %zexted_vb = zext <vscale x 8 x i16> %vb to <vscale x 8 x i32>
+  %expand = lshr <vscale x 8 x i32> %zexted_va, %zexted_vb
+  %vc = trunc <vscale x 8 x i32> %expand to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %vc
+}
+
 define <vscale x 16 x i16> @vsrl_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
 ; CHECK-LABEL: vsrl_vx_nxv16i16:
 ; CHECK:       # %bb.0:
@@ -290,6 +445,21 @@ define <vscale x 16 x i16> @vsrl_vx_nxv16i16_0(<vscale x 16 x i16> %va) {
   ret <vscale x 16 x i16> %vc
 }
 
+define <vscale x 16 x i16> @vsrl_vv_nxv16i16_zext_zext(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
+; CHECK-LABEL: vsrl_vv_nxv16i16_zext_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vzext.vf2 v16, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vnsrl.wv v8, v16, v12
+; CHECK-NEXT:    ret
+  %zexted_va = zext <vscale x 16 x i16> %va to <vscale x 16 x i32>
+  %zexted_vb = zext <vscale x 16 x i16> %vb to <vscale x 16 x i32>
+  %expand = lshr <vscale x 16 x i32> %zexted_va, %zexted_vb
+  %vc = trunc <vscale x 16 x i32> %expand to <vscale x 16 x i16>
+  ret <vscale x 16 x i16> %vc
+}
+
 define <vscale x 32 x i16> @vsrl_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
 ; CHECK-LABEL: vsrl_vx_nxv32i16:
 ; CHECK:       # %bb.0:

>From a3f248439f48e7f01bdcc8e0248f9daf46c431ad Mon Sep 17 00:00:00 2001
From: LWenH <924105575 at qq.com>
Date: Sun, 15 Oct 2023 13:45:54 +0800
Subject: [PATCH 2/2] [RISCV] combine trunc (srl zext (X), zext (Y)) to srl (X,
 umin (Y, scalarsize(Y) - 1)

Like #65728, for i8/i16 element-wise vector logical right shift,
the src value would be first zext to i32 and the shift amount
would be zext to i32 to perform the vsrl instruction, and followed
by a trunc to get the final calculation result. This would
be expanded into a series of "vsetvli" and "vnsrl" instructions
later. For RVV, the vsrl instruction only treats the lg2(sew)
bits as the shift amount, so we can calculate the shift amount
by using umin(Y,  scalarsize(Y) - 1).
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp |  21 ++++
 llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll  | 100 ++++++++------------
 2 files changed, 61 insertions(+), 60 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d7552317fd8bc69..036e5655a2984cc 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -14303,6 +14303,27 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
         }
       }
     }
+
+    // Similarly, we can also optimize the zext nodes for the srl here
+    // trunc (srl zext (X), zext (Y)) -> srl (X, umin (Y, scalarsize(Y) - 1))
+    if (Op.getOpcode() == ISD::SRL && Op.hasOneUse()) {
+      SDValue N0 = Op.getOperand(0);
+      SDValue N1 = Op.getOperand(1);
+      if (N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() &&
+          N1.getOpcode() == ISD::ZERO_EXTEND && N1.hasOneUse()) {
+        SDValue N00 = N0.getOperand(0);
+        SDValue N10 = N1.getOperand(0);
+        if (N00.getValueType().isVector() &&
+            N00.getValueType() == N10.getValueType() &&
+            N->getValueType(0) == N10.getValueType()) {
+          unsigned MaxShAmt = N10.getValueType().getScalarSizeInBits() - 1;
+          SDValue UMin = DAG.getNode(
+              ISD::UMIN, SDLoc(N1), N->getValueType(0), N10,
+              DAG.getConstant(MaxShAmt, SDLoc(N1), N->getValueType(0)));
+          return DAG.getNode(ISD::SRL, SDLoc(N), N->getValueType(0), N00, UMin);
+        }
+      }
+    }
     break;
   }
   case ISD::TRUNCATE:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll
index 1807d42769b4e62..8b2201e147ffe1e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode.ll
@@ -29,14 +29,10 @@ define <vscale x 1 x i8> @vsrl_vx_nxv1i8_0(<vscale x 1 x i8> %va) {
 define <vscale x 1 x i8> @vsrl_vv_nxv1i8_zext_zext(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
 ; CHECK-LABEL: vsrl_vv_nxv1i8_zext_zext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vzext.vf4 v10, v8
-; CHECK-NEXT:    vzext.vf4 v8, v9
-; CHECK-NEXT:    vsrl.vv v8, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    li a0, 7
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vminu.vx v9, v9, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %zexted_va = zext <vscale x 1 x i8> %va to <vscale x 1 x i32>
   %zexted_vb = zext <vscale x 1 x i8> %vb to <vscale x 1 x i32>
@@ -72,14 +68,10 @@ define <vscale x 2 x i8> @vsrl_vx_nxv2i8_0(<vscale x 2 x i8> %va) {
 define <vscale x 2 x i8> @vsrl_vv_nxv2i8_zext_zext(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
 ; CHECK-LABEL: vsrl_vv_nxv2i8_zext_zext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vzext.vf4 v10, v8
-; CHECK-NEXT:    vzext.vf4 v8, v9
-; CHECK-NEXT:    vsrl.vv v8, v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    li a0, 7
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vminu.vx v9, v9, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %zexted_va = zext <vscale x 2 x i8> %va to <vscale x 2 x i32>
   %zexted_vb = zext <vscale x 2 x i8> %vb to <vscale x 2 x i32>
@@ -115,14 +107,10 @@ define <vscale x 4 x i8> @vsrl_vx_nxv4i8_0(<vscale x 4 x i8> %va) {
 define <vscale x 4 x i8> @vsrl_vv_nxv4i8_zext_zext(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
 ; CHECK-LABEL: vsrl_vv_nxv4i8_zext_zext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vzext.vf4 v10, v8
-; CHECK-NEXT:    vzext.vf4 v12, v9
-; CHECK-NEXT:    vsrl.vv v8, v10, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vnsrl.wi v10, v8, 0
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    li a0, 7
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vminu.vx v9, v9, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %zexted_va = zext <vscale x 4 x i8> %va to <vscale x 4 x i32>
   %zexted_vb = zext <vscale x 4 x i8> %vb to <vscale x 4 x i32>
@@ -158,14 +146,10 @@ define <vscale x 8 x i8> @vsrl_vx_nxv8i8_0(<vscale x 8 x i8> %va) {
 define <vscale x 8 x i8> @vsrl_vv_nxv8i8_zext_zext(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
 ; CHECK-LABEL: vsrl_vv_nxv8i8_zext_zext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vzext.vf4 v12, v8
-; CHECK-NEXT:    vzext.vf4 v16, v9
-; CHECK-NEXT:    vsrl.vv v8, v12, v16
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vnsrl.wi v12, v8, 0
-; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    li a0, 7
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vminu.vx v9, v9, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %zexted_va = zext <vscale x 8 x i8> %va to <vscale x 8 x i32>
   %zexted_vb = zext <vscale x 8 x i8> %vb to <vscale x 8 x i32>
@@ -201,14 +185,10 @@ define <vscale x 16 x i8> @vsrl_vx_nxv16i8_0(<vscale x 16 x i8> %va) {
 define <vscale x 16 x i8> @vsrl_vv_nxv16i8_zext_zext(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
 ; CHECK-LABEL: vsrl_vv_nxv16i8_zext_zext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vzext.vf4 v16, v8
-; CHECK-NEXT:    vzext.vf4 v24, v10
-; CHECK-NEXT:    vsrl.vv v8, v16, v24
-; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vnsrl.wi v16, v8, 0
-; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    li a0, 7
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vminu.vx v10, v10, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
   %zexted_va = zext <vscale x 16 x i8> %va to <vscale x 16 x i32>
   %zexted_vb = zext <vscale x 16 x i8> %vb to <vscale x 16 x i32>
@@ -292,10 +272,10 @@ define <vscale x 1 x i16> @vsrl_vx_nxv1i16_0(<vscale x 1 x i16> %va) {
 define <vscale x 1 x i16> @vsrl_vv_nxv1i16_zext_zext(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
 ; CHECK-LABEL: vsrl_vv_nxv1i16_zext_zext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vzext.vf2 v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vnsrl.wv v8, v10, v9
+; CHECK-NEXT:    li a0, 15
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vminu.vx v9, v9, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %zexted_va = zext <vscale x 1 x i16> %va to <vscale x 1 x i32>
   %zexted_vb = zext <vscale x 1 x i16> %vb to <vscale x 1 x i32>
@@ -331,10 +311,10 @@ define <vscale x 2 x i16> @vsrl_vx_nxv2i16_0(<vscale x 2 x i16> %va) {
 define <vscale x 2 x i16> @vsrl_vv_nxv2i16_zext_zext(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
 ; CHECK-LABEL: vsrl_vv_nxv2i16_zext_zext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vzext.vf2 v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vnsrl.wv v8, v10, v9
+; CHECK-NEXT:    li a0, 15
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vminu.vx v9, v9, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %zexted_va = zext <vscale x 2 x i16> %va to <vscale x 2 x i32>
   %zexted_vb = zext <vscale x 2 x i16> %vb to <vscale x 2 x i32>
@@ -370,10 +350,10 @@ define <vscale x 4 x i16> @vsrl_vx_nxv4i16_0(<vscale x 4 x i16> %va) {
 define <vscale x 4 x i16> @vsrl_vv_nxv4i16_zext_zext(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
 ; CHECK-LABEL: vsrl_vv_nxv4i16_zext_zext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vzext.vf2 v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vnsrl.wv v8, v10, v9
+; CHECK-NEXT:    li a0, 15
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vminu.vx v9, v9, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-NEXT:    ret
   %zexted_va = zext <vscale x 4 x i16> %va to <vscale x 4 x i32>
   %zexted_vb = zext <vscale x 4 x i16> %vb to <vscale x 4 x i32>
@@ -409,10 +389,10 @@ define <vscale x 8 x i16> @vsrl_vx_nxv8i16_0(<vscale x 8 x i16> %va) {
 define <vscale x 8 x i16> @vsrl_vv_nxv8i16_zext_zext(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
 ; CHECK-LABEL: vsrl_vv_nxv8i16_zext_zext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vzext.vf2 v12, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vnsrl.wv v8, v12, v10
+; CHECK-NEXT:    li a0, 15
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vminu.vx v10, v10, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v10
 ; CHECK-NEXT:    ret
   %zexted_va = zext <vscale x 8 x i16> %va to <vscale x 8 x i32>
   %zexted_vb = zext <vscale x 8 x i16> %vb to <vscale x 8 x i32>
@@ -448,10 +428,10 @@ define <vscale x 16 x i16> @vsrl_vx_nxv16i16_0(<vscale x 16 x i16> %va) {
 define <vscale x 16 x i16> @vsrl_vv_nxv16i16_zext_zext(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
 ; CHECK-LABEL: vsrl_vv_nxv16i16_zext_zext:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vzext.vf2 v16, v8
-; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vnsrl.wv v8, v16, v12
+; CHECK-NEXT:    li a0, 15
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vminu.vx v12, v12, a0
+; CHECK-NEXT:    vsrl.vv v8, v8, v12
 ; CHECK-NEXT:    ret
   %zexted_va = zext <vscale x 16 x i16> %va to <vscale x 16 x i32>
   %zexted_vb = zext <vscale x 16 x i16> %vb to <vscale x 16 x i32>



More information about the llvm-commits mailing list