[llvm] [RISCV][Isel] Remove redundant vmerge for the scalable vwadd(u).wv (PR #80079)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Jan 30 17:31:26 PST 2024
https://github.com/sun-jacobi updated https://github.com/llvm/llvm-project/pull/80079
>From c3317fd88ba5d39b24c7aed0a8d7aa501c8563f7 Mon Sep 17 00:00:00 2001
From: sun-jacobi <sun1011jacobi at gmail.com>
Date: Wed, 31 Jan 2024 08:30:36 +0900
Subject: [PATCH 1/2] [RISCV][Isel] Remove redundant vmerge for the scalable
vector vwadd(u).wv.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 24 +++--
.../CodeGen/RISCV/rvv/vwadd-mask-sdnode.ll | 90 +++++++++++++++++++
2 files changed, 108 insertions(+), 6 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vwadd-mask-sdnode.ll
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 82836346d8832..f63532ea07fab 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -13776,8 +13776,11 @@ static SDValue combineVWADDWSelect(SDNode *N, SelectionDAG &DAG) {
SDValue Y = N->getOperand(0);
SDValue MergeOp = N->getOperand(1);
- if (MergeOp.getOpcode() != RISCVISD::VMERGE_VL)
+ unsigned MergeOpc = MergeOp.getOpcode();
+
+ if (MergeOpc != RISCVISD::VMERGE_VL && MergeOpc != ISD::VSELECT)
return SDValue();
+
SDValue X = MergeOp->getOperand(1);
if (!MergeOp.hasOneUse())
@@ -13795,13 +13798,22 @@ static SDValue combineVWADDWSelect(SDNode *N, SelectionDAG &DAG) {
// False value of MergeOp should be all zeros
SDValue Z = MergeOp->getOperand(2);
- if (Z.getOpcode() != ISD::INSERT_SUBVECTOR)
- return SDValue();
- if (!ISD::isBuildVectorAllZeros(Z.getOperand(1).getNode()))
- return SDValue();
- if (!isNullOrNullSplat(Z.getOperand(0)) && !Z.getOperand(0).isUndef())
+
+ // Scalable vector
+ if (MergeOpc == ISD::VSELECT &&
+ !ISD::isConstantSplatVectorAllZeros(Z.getNode()))
return SDValue();
+ // Fixed-length vector
+ if (MergeOpc == RISCVISD::VMERGE_VL) {
+ if (Z.getOpcode() != ISD::INSERT_SUBVECTOR)
+ return SDValue();
+ if (!ISD::isBuildVectorAllZeros(Z.getOperand(1).getNode()))
+ return SDValue();
+ if (!isNullOrNullSplat(Z.getOperand(0)) && !Z.getOperand(0).isUndef())
+ return SDValue();
+ }
+
return DAG.getNode(Opc, SDLoc(N), N->getValueType(0),
{Y, X, Y, MergeOp->getOperand(0), N->getOperand(4)},
N->getFlags());
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-mask-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-mask-sdnode.ll
new file mode 100644
index 0000000000000..ad7ad991e082c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-mask-sdnode.ll
@@ -0,0 +1,90 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+
+define <vscale x 8 x i64> @vwadd_wv_mask_v8i32(<vscale x 8 x i32> %x, <vscale x 8 x i64> %y) {
+; CHECK-LABEL: vwadd_wv_mask_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 42
+; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu
+; CHECK-NEXT: vwadd.wv v16, v16, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %mask = icmp slt <vscale x 8 x i32> %x, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 42, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
+ %a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> zeroinitializer
+ %sa = sext <vscale x 8 x i32> %a to <vscale x 8 x i64>
+ %ret = add <vscale x 8 x i64> %sa, %y
+ ret <vscale x 8 x i64> %ret
+}
+
+define <vscale x 8 x i64> @vwaddu_wv_mask_v8i32(<vscale x 8 x i32> %x, <vscale x 8 x i64> %y) {
+; CHECK-LABEL: vwaddu_wv_mask_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 42
+; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu
+; CHECK-NEXT: vwaddu.wv v16, v16, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %mask = icmp slt <vscale x 8 x i32> %x, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 42, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
+ %a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> zeroinitializer
+ %sa = zext <vscale x 8 x i32> %a to <vscale x 8 x i64>
+ %ret = add <vscale x 8 x i64> %sa, %y
+ ret <vscale x 8 x i64> %ret
+}
+
+define <vscale x 8 x i64> @vwaddu_vv_mask_v8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %y) {
+; CHECK-LABEL: vwaddu_vv_mask_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 42
+; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
+; CHECK-NEXT: vwaddu.vv v16, v8, v12
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %mask = icmp slt <vscale x 8 x i32> %x, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 42, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
+ %a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> zeroinitializer
+ %sa = zext <vscale x 8 x i32> %a to <vscale x 8 x i64>
+ %sy = zext <vscale x 8 x i32> %y to <vscale x 8 x i64>
+ %ret = add <vscale x 8 x i64> %sa, %sy
+ ret <vscale x 8 x i64> %ret
+}
+
+define <vscale x 8 x i64> @vwadd_wv_mask_v8i32_commutative(<vscale x 8 x i32> %x, <vscale x 8 x i64> %y) {
+; CHECK-LABEL: vwadd_wv_mask_v8i32_commutative:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 42
+; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu
+; CHECK-NEXT: vwadd.wv v16, v16, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: ret
+ %mask = icmp slt <vscale x 8 x i32> %x, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 42, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
+ %a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> zeroinitializer
+ %sa = sext <vscale x 8 x i32> %a to <vscale x 8 x i64>
+ %ret = add <vscale x 8 x i64> %y, %sa
+ ret <vscale x 8 x i64> %ret
+}
+
+define <vscale x 8 x i64> @vwadd_wv_mask_v8i32_nonzero(<vscale x 8 x i32> %x, <vscale x 8 x i64> %y) {
+; CHECK-LABEL: vwadd_wv_mask_v8i32_nonzero:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 42
+; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vmv.v.i v12, 1
+; CHECK-NEXT: vmerge.vvm v24, v12, v8, v0
+; CHECK-NEXT: vwadd.wv v8, v16, v24
+; CHECK-NEXT: ret
+ %mask = icmp slt <vscale x 8 x i32> %x, shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 42, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
+ %a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> shufflevector (<vscale x 8 x i32> insertelement (<vscale x 8 x i32> poison, i32 1, i64 0), <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer)
+ %sa = sext <vscale x 8 x i32> %a to <vscale x 8 x i64>
+ %ret = add <vscale x 8 x i64> %sa, %y
+ ret <vscale x 8 x i64> %ret
+}
>From e034e74553d5e6aab7166be5785920fd5f768964 Mon Sep 17 00:00:00 2001
From: sun-jacobi <sun1011jacobi at gmail.com>
Date: Wed, 31 Jan 2024 10:31:10 +0900
Subject: [PATCH 2/2] update vector-interleave.ll.
---
llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index 5cdbac5ac83d2..e84fd1b1a7036 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -30,15 +30,17 @@ define <vscale x 32 x i1> @vector_interleave_nxv32i1_nxv16i1(<vscale x 16 x i1>
;
; ZVBB-LABEL: vector_interleave_nxv32i1_nxv16i1:
; ZVBB: # %bb.0:
-; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; ZVBB-NEXT: vmv1r.v v9, v0
+; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; ZVBB-NEXT: vmv.v.i v10, 0
-; ZVBB-NEXT: vmerge.vim v12, v10, 1, v0
; ZVBB-NEXT: vmv1r.v v0, v8
-; ZVBB-NEXT: vmerge.vim v8, v10, 1, v0
-; ZVBB-NEXT: vwsll.vi v16, v8, 8
-; ZVBB-NEXT: vwaddu.wv v16, v16, v12
-; ZVBB-NEXT: vmsne.vi v8, v18, 0
-; ZVBB-NEXT: vmsne.vi v0, v16, 0
+; ZVBB-NEXT: vmerge.vim v10, v10, 1, v0
+; ZVBB-NEXT: vwsll.vi v12, v10, 8
+; ZVBB-NEXT: li a0, 1
+; ZVBB-NEXT: vmv1r.v v0, v9
+; ZVBB-NEXT: vwaddu.wx v12, v12, a0, v0.t
+; ZVBB-NEXT: vmsne.vi v8, v14, 0
+; ZVBB-NEXT: vmsne.vi v0, v12, 0
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: srli a0, a0, 2
; ZVBB-NEXT: add a1, a0, a0
More information about the llvm-commits
mailing list