[llvm] 8e4909a - [RISCV] Remove unnecessary vand.vi from vXi1 and nvXvi1 VECTOR_REVERSE codegen. (#109071)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 18 09:43:51 PDT 2024
Author: Craig Topper
Date: 2024-09-18T09:43:48-07:00
New Revision: 8e4909aa198d8beaf32ee0abc59a06e2e54dc3bd
URL: https://github.com/llvm/llvm-project/commit/8e4909aa198d8beaf32ee0abc59a06e2e54dc3bd
DIFF: https://github.com/llvm/llvm-project/commit/8e4909aa198d8beaf32ee0abc59a06e2e54dc3bd.diff
LOG: [RISCV] Remove unnecessary vand.vi from vXi1 and nvXvi1 VECTOR_REVERSE codegen. (#109071)
Use a setne with 0 instead of a trunc. We know we zero extended the node
so we can get by with a non-zero check only. The truncate lowering
doesn't know that we zero extended so has to mask the lsb.
I don't think DAG combine sees the trunc before we lower it to RISCVISD
nodes so we don't get a chance to use computeKnownBits to remove the
AND.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 0f76ad6c5e9288..189fb741f34cd1 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -10775,7 +10775,8 @@ SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
MVT WidenVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WidenVT, Op.getOperand(0));
SDValue Op2 = DAG.getNode(ISD::VECTOR_REVERSE, DL, WidenVT, Op1);
- return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Op2);
+ return DAG.getSetCC(DL, VecVT, Op2,
+ DAG.getConstant(0, DL, Op2.getValueType()), ISD::SETNE);
}
MVT ContainerVT = VecVT;
diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
index 2a915529e61dca..9d0cb22eb5f475 100644
--- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll
@@ -24,8 +24,7 @@ define <vscale x 2 x i1> @reverse_nxv2i1(<vscale x 2 x i1> %a) {
; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0
; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9
-; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1
-; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v10, 0
; RV32-BITS-UNKNOWN-NEXT: ret
;
; RV32-BITS-256-LABEL: reverse_nxv2i1:
@@ -39,8 +38,7 @@ define <vscale x 2 x i1> @reverse_nxv2i1(<vscale x 2 x i1> %a) {
; RV32-BITS-256-NEXT: vid.v v9
; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0
; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9
-; RV32-BITS-256-NEXT: vand.vi v8, v10, 1
-; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-256-NEXT: vmsne.vi v0, v10, 0
; RV32-BITS-256-NEXT: ret
;
; RV32-BITS-512-LABEL: reverse_nxv2i1:
@@ -54,8 +52,7 @@ define <vscale x 2 x i1> @reverse_nxv2i1(<vscale x 2 x i1> %a) {
; RV32-BITS-512-NEXT: vid.v v9
; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0
; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9
-; RV32-BITS-512-NEXT: vand.vi v8, v10, 1
-; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-512-NEXT: vmsne.vi v0, v10, 0
; RV32-BITS-512-NEXT: ret
;
; RV64-BITS-UNKNOWN-LABEL: reverse_nxv2i1:
@@ -71,8 +68,7 @@ define <vscale x 2 x i1> @reverse_nxv2i1(<vscale x 2 x i1> %a) {
; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0
; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9
-; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1
-; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v10, 0
; RV64-BITS-UNKNOWN-NEXT: ret
;
; RV64-BITS-256-LABEL: reverse_nxv2i1:
@@ -86,8 +82,7 @@ define <vscale x 2 x i1> @reverse_nxv2i1(<vscale x 2 x i1> %a) {
; RV64-BITS-256-NEXT: vid.v v9
; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0
; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9
-; RV64-BITS-256-NEXT: vand.vi v8, v10, 1
-; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-256-NEXT: vmsne.vi v0, v10, 0
; RV64-BITS-256-NEXT: ret
;
; RV64-BITS-512-LABEL: reverse_nxv2i1:
@@ -101,8 +96,7 @@ define <vscale x 2 x i1> @reverse_nxv2i1(<vscale x 2 x i1> %a) {
; RV64-BITS-512-NEXT: vid.v v9
; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0
; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9
-; RV64-BITS-512-NEXT: vand.vi v8, v10, 1
-; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-512-NEXT: vmsne.vi v0, v10, 0
; RV64-BITS-512-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.vector.reverse.nxv2i1(<vscale x 2 x i1> %a)
ret <vscale x 2 x i1> %res
@@ -122,8 +116,7 @@ define <vscale x 4 x i1> @reverse_nxv4i1(<vscale x 4 x i1> %a) {
; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0
; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9
-; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1
-; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v10, 0
; RV32-BITS-UNKNOWN-NEXT: ret
;
; RV32-BITS-256-LABEL: reverse_nxv4i1:
@@ -137,8 +130,7 @@ define <vscale x 4 x i1> @reverse_nxv4i1(<vscale x 4 x i1> %a) {
; RV32-BITS-256-NEXT: vid.v v9
; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0
; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9
-; RV32-BITS-256-NEXT: vand.vi v8, v10, 1
-; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-256-NEXT: vmsne.vi v0, v10, 0
; RV32-BITS-256-NEXT: ret
;
; RV32-BITS-512-LABEL: reverse_nxv4i1:
@@ -152,8 +144,7 @@ define <vscale x 4 x i1> @reverse_nxv4i1(<vscale x 4 x i1> %a) {
; RV32-BITS-512-NEXT: vid.v v9
; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0
; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9
-; RV32-BITS-512-NEXT: vand.vi v8, v10, 1
-; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-512-NEXT: vmsne.vi v0, v10, 0
; RV32-BITS-512-NEXT: ret
;
; RV64-BITS-UNKNOWN-LABEL: reverse_nxv4i1:
@@ -169,8 +160,7 @@ define <vscale x 4 x i1> @reverse_nxv4i1(<vscale x 4 x i1> %a) {
; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v9, v9, a0
; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v9
-; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v10, 1
-; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v10, 0
; RV64-BITS-UNKNOWN-NEXT: ret
;
; RV64-BITS-256-LABEL: reverse_nxv4i1:
@@ -184,8 +174,7 @@ define <vscale x 4 x i1> @reverse_nxv4i1(<vscale x 4 x i1> %a) {
; RV64-BITS-256-NEXT: vid.v v9
; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0
; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9
-; RV64-BITS-256-NEXT: vand.vi v8, v10, 1
-; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-256-NEXT: vmsne.vi v0, v10, 0
; RV64-BITS-256-NEXT: ret
;
; RV64-BITS-512-LABEL: reverse_nxv4i1:
@@ -199,8 +188,7 @@ define <vscale x 4 x i1> @reverse_nxv4i1(<vscale x 4 x i1> %a) {
; RV64-BITS-512-NEXT: vid.v v9
; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0
; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9
-; RV64-BITS-512-NEXT: vand.vi v8, v10, 1
-; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-512-NEXT: vmsne.vi v0, v10, 0
; RV64-BITS-512-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
ret <vscale x 4 x i1> %res
@@ -219,8 +207,7 @@ define <vscale x 8 x i1> @reverse_nxv8i1(<vscale x 8 x i1> %a) {
; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0
; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10
-; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v9, 1
-; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v9, 0
; RV32-BITS-UNKNOWN-NEXT: ret
;
; RV32-BITS-256-LABEL: reverse_nxv8i1:
@@ -233,8 +220,7 @@ define <vscale x 8 x i1> @reverse_nxv8i1(<vscale x 8 x i1> %a) {
; RV32-BITS-256-NEXT: vid.v v9
; RV32-BITS-256-NEXT: vrsub.vx v9, v9, a0
; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v9
-; RV32-BITS-256-NEXT: vand.vi v8, v10, 1
-; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-256-NEXT: vmsne.vi v0, v10, 0
; RV32-BITS-256-NEXT: ret
;
; RV32-BITS-512-LABEL: reverse_nxv8i1:
@@ -247,8 +233,7 @@ define <vscale x 8 x i1> @reverse_nxv8i1(<vscale x 8 x i1> %a) {
; RV32-BITS-512-NEXT: vid.v v9
; RV32-BITS-512-NEXT: vrsub.vx v9, v9, a0
; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v9
-; RV32-BITS-512-NEXT: vand.vi v8, v10, 1
-; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-512-NEXT: vmsne.vi v0, v10, 0
; RV32-BITS-512-NEXT: ret
;
; RV64-BITS-UNKNOWN-LABEL: reverse_nxv8i1:
@@ -263,8 +248,7 @@ define <vscale x 8 x i1> @reverse_nxv8i1(<vscale x 8 x i1> %a) {
; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0
; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10
-; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v9, 1
-; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v9, 0
; RV64-BITS-UNKNOWN-NEXT: ret
;
; RV64-BITS-256-LABEL: reverse_nxv8i1:
@@ -277,8 +261,7 @@ define <vscale x 8 x i1> @reverse_nxv8i1(<vscale x 8 x i1> %a) {
; RV64-BITS-256-NEXT: vid.v v9
; RV64-BITS-256-NEXT: vrsub.vx v9, v9, a0
; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v9
-; RV64-BITS-256-NEXT: vand.vi v8, v10, 1
-; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-256-NEXT: vmsne.vi v0, v10, 0
; RV64-BITS-256-NEXT: ret
;
; RV64-BITS-512-LABEL: reverse_nxv8i1:
@@ -291,8 +274,7 @@ define <vscale x 8 x i1> @reverse_nxv8i1(<vscale x 8 x i1> %a) {
; RV64-BITS-512-NEXT: vid.v v9
; RV64-BITS-512-NEXT: vrsub.vx v9, v9, a0
; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v9
-; RV64-BITS-512-NEXT: vand.vi v8, v10, 1
-; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-512-NEXT: vmsne.vi v0, v10, 0
; RV64-BITS-512-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.vector.reverse.nxv8i1(<vscale x 8 x i1> %a)
ret <vscale x 8 x i1> %res
@@ -313,8 +295,7 @@ define <vscale x 16 x i1> @reverse_nxv16i1(<vscale x 16 x i1> %a) {
; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v8
; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v8
; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v12, 1
-; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0
; RV32-BITS-UNKNOWN-NEXT: ret
;
; RV32-BITS-256-LABEL: reverse_nxv16i1:
@@ -331,8 +312,7 @@ define <vscale x 16 x i1> @reverse_nxv16i1(<vscale x 16 x i1> %a) {
; RV32-BITS-256-NEXT: vrgather.vv v13, v10, v8
; RV32-BITS-256-NEXT: vrgather.vv v12, v11, v8
; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; RV32-BITS-256-NEXT: vand.vi v8, v12, 1
-; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-256-NEXT: vmsne.vi v0, v12, 0
; RV32-BITS-256-NEXT: ret
;
; RV32-BITS-512-LABEL: reverse_nxv16i1:
@@ -349,8 +329,7 @@ define <vscale x 16 x i1> @reverse_nxv16i1(<vscale x 16 x i1> %a) {
; RV32-BITS-512-NEXT: vrgather.vv v13, v10, v8
; RV32-BITS-512-NEXT: vrgather.vv v12, v11, v8
; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; RV32-BITS-512-NEXT: vand.vi v8, v12, 1
-; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
+; RV32-BITS-512-NEXT: vmsne.vi v0, v12, 0
; RV32-BITS-512-NEXT: ret
;
; RV64-BITS-UNKNOWN-LABEL: reverse_nxv16i1:
@@ -367,8 +346,7 @@ define <vscale x 16 x i1> @reverse_nxv16i1(<vscale x 16 x i1> %a) {
; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v13, v10, v8
; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v11, v8
; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v12, 1
-; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v12, 0
; RV64-BITS-UNKNOWN-NEXT: ret
;
; RV64-BITS-256-LABEL: reverse_nxv16i1:
@@ -385,8 +363,7 @@ define <vscale x 16 x i1> @reverse_nxv16i1(<vscale x 16 x i1> %a) {
; RV64-BITS-256-NEXT: vrgather.vv v13, v10, v8
; RV64-BITS-256-NEXT: vrgather.vv v12, v11, v8
; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; RV64-BITS-256-NEXT: vand.vi v8, v12, 1
-; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-256-NEXT: vmsne.vi v0, v12, 0
; RV64-BITS-256-NEXT: ret
;
; RV64-BITS-512-LABEL: reverse_nxv16i1:
@@ -403,8 +380,7 @@ define <vscale x 16 x i1> @reverse_nxv16i1(<vscale x 16 x i1> %a) {
; RV64-BITS-512-NEXT: vrgather.vv v13, v10, v8
; RV64-BITS-512-NEXT: vrgather.vv v12, v11, v8
; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; RV64-BITS-512-NEXT: vand.vi v8, v12, 1
-; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
+; RV64-BITS-512-NEXT: vmsne.vi v0, v12, 0
; RV64-BITS-512-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.vector.reverse.nxv16i1(<vscale x 16 x i1> %a)
ret <vscale x 16 x i1> %res
@@ -427,7 +403,6 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v18, v12
; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v19, v12
; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1
; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
; RV32-BITS-UNKNOWN-NEXT: ret
;
@@ -447,7 +422,6 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV32-BITS-256-NEXT: vrgather.vv v9, v18, v12
; RV32-BITS-256-NEXT: vrgather.vv v8, v19, v12
; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-BITS-256-NEXT: vand.vi v8, v8, 1
; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
; RV32-BITS-256-NEXT: ret
;
@@ -467,7 +441,6 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV32-BITS-512-NEXT: vrgather.vv v9, v18, v12
; RV32-BITS-512-NEXT: vrgather.vv v8, v19, v12
; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-BITS-512-NEXT: vand.vi v8, v8, 1
; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
; RV32-BITS-512-NEXT: ret
;
@@ -487,7 +460,6 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v18, v12
; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v19, v12
; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1
; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
; RV64-BITS-UNKNOWN-NEXT: ret
;
@@ -507,7 +479,6 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV64-BITS-256-NEXT: vrgather.vv v9, v18, v12
; RV64-BITS-256-NEXT: vrgather.vv v8, v19, v12
; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-BITS-256-NEXT: vand.vi v8, v8, 1
; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
; RV64-BITS-256-NEXT: ret
;
@@ -527,7 +498,6 @@ define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) {
; RV64-BITS-512-NEXT: vrgather.vv v9, v18, v12
; RV64-BITS-512-NEXT: vrgather.vv v8, v19, v12
; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-BITS-512-NEXT: vand.vi v8, v8, 1
; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
; RV64-BITS-512-NEXT: ret
%res = call <vscale x 32 x i1> @llvm.vector.reverse.nxv32i1(<vscale x 32 x i1> %a)
@@ -555,7 +525,6 @@ define <vscale x 64 x i1> @reverse_nxv64i1(<vscale x 64 x i1> %a) {
; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v30, v16
; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v31, v16
; RV32-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1
; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
; RV32-BITS-UNKNOWN-NEXT: ret
;
@@ -579,7 +548,6 @@ define <vscale x 64 x i1> @reverse_nxv64i1(<vscale x 64 x i1> %a) {
; RV32-BITS-256-NEXT: vrgather.vv v9, v22, v24
; RV32-BITS-256-NEXT: vrgather.vv v8, v23, v24
; RV32-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; RV32-BITS-256-NEXT: vand.vi v8, v8, 1
; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0
; RV32-BITS-256-NEXT: ret
;
@@ -603,7 +571,6 @@ define <vscale x 64 x i1> @reverse_nxv64i1(<vscale x 64 x i1> %a) {
; RV32-BITS-512-NEXT: vrgather.vv v9, v22, v24
; RV32-BITS-512-NEXT: vrgather.vv v8, v23, v24
; RV32-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; RV32-BITS-512-NEXT: vand.vi v8, v8, 1
; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0
; RV32-BITS-512-NEXT: ret
;
@@ -627,7 +594,6 @@ define <vscale x 64 x i1> @reverse_nxv64i1(<vscale x 64 x i1> %a) {
; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v30, v16
; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v8, v31, v16
; RV64-BITS-UNKNOWN-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1
; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0
; RV64-BITS-UNKNOWN-NEXT: ret
;
@@ -651,7 +617,6 @@ define <vscale x 64 x i1> @reverse_nxv64i1(<vscale x 64 x i1> %a) {
; RV64-BITS-256-NEXT: vrgather.vv v9, v22, v24
; RV64-BITS-256-NEXT: vrgather.vv v8, v23, v24
; RV64-BITS-256-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; RV64-BITS-256-NEXT: vand.vi v8, v8, 1
; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0
; RV64-BITS-256-NEXT: ret
;
@@ -675,7 +640,6 @@ define <vscale x 64 x i1> @reverse_nxv64i1(<vscale x 64 x i1> %a) {
; RV64-BITS-512-NEXT: vrgather.vv v9, v22, v24
; RV64-BITS-512-NEXT: vrgather.vv v8, v23, v24
; RV64-BITS-512-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; RV64-BITS-512-NEXT: vand.vi v8, v8, 1
; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0
; RV64-BITS-512-NEXT: ret
%res = call <vscale x 64 x i1> @llvm.vector.reverse.nxv64i1(<vscale x 64 x i1> %a)
More information about the llvm-commits
mailing list