[llvm] [RISCV] Use ISD::XOR instead of RISCVISD::VMXOR_VL in lowerVectorMaskVecReduction of scalable ISD::VECREDUCE_AND (PR #121812)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 6 10:33:06 PST 2025
https://github.com/topperc created https://github.com/llvm/llvm-project/pull/121812
This allows combining the XOR with earlier ISD::ANDs inserted by type legalization.
>From 71ec8df85b345bce1835f0757d88ef62dc393924 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 6 Jan 2025 10:26:36 -0800
Subject: [PATCH] [RISCV] Use ISD::XOR instead of RISCVISD::VMXOR_VL in
lowerVectorMaskVecReduction of scalable ISD::VECREDUCE_AND
This allows combining the XOR with earlier ISD::ANDs inserted by
type legalization.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 5 ++-
.../CodeGen/RISCV/rvv/vreductions-mask.ll | 36 +++++++------------
2 files changed, 16 insertions(+), 25 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 7efe3732d8be13..0d443cf7ec5c83 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -10154,7 +10154,10 @@ SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
case ISD::VP_REDUCE_AND: {
// vcpop ~x == 0
SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
- Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
+ if (IsVP || VecVT.isFixedLengthVector())
+ Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
+ else
+ Vec = DAG.getNode(ISD::XOR, DL, ContainerVT, Vec, TrueMask);
Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
CC = ISD::SETEQ;
break;
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll
index d99fd036b4fc92..ce9d6c5ab91a8a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll
@@ -785,8 +785,7 @@ define zeroext i1 @vreduce_and_nxv128i1(<vscale x 128 x i1> %v) {
; CHECK-LABEL: vreduce_and_nxv128i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmand.mm v8, v0, v8
-; CHECK-NEXT: vmnot.m v8, v8
+; CHECK-NEXT: vmnand.mm v8, v0, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
@@ -814,8 +813,7 @@ define zeroext i1 @vreduce_smax_nxv128i1(<vscale x 128 x i1> %v) {
; CHECK-LABEL: vreduce_smax_nxv128i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmand.mm v8, v0, v8
-; CHECK-NEXT: vmnot.m v8, v8
+; CHECK-NEXT: vmnand.mm v8, v0, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
@@ -829,8 +827,7 @@ define zeroext i1 @vreduce_umin_nxv128i1(<vscale x 128 x i1> %v) {
; CHECK-LABEL: vreduce_umin_nxv128i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmand.mm v8, v0, v8
-; CHECK-NEXT: vmnot.m v8, v8
+; CHECK-NEXT: vmnand.mm v8, v0, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
@@ -892,8 +889,7 @@ define zeroext i1 @vreduce_and_nxv256i1(<vscale x 256 x i1> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; CHECK-NEXT: vmand.mm v8, v8, v10
; CHECK-NEXT: vmand.mm v9, v0, v9
-; CHECK-NEXT: vmand.mm v8, v9, v8
-; CHECK-NEXT: vmnot.m v8, v8
+; CHECK-NEXT: vmnand.mm v8, v9, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
@@ -925,8 +921,7 @@ define zeroext i1 @vreduce_smax_nxv256i1(<vscale x 256 x i1> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; CHECK-NEXT: vmand.mm v8, v8, v10
; CHECK-NEXT: vmand.mm v9, v0, v9
-; CHECK-NEXT: vmand.mm v8, v9, v8
-; CHECK-NEXT: vmnot.m v8, v8
+; CHECK-NEXT: vmnand.mm v8, v9, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
@@ -942,8 +937,7 @@ define zeroext i1 @vreduce_umin_nxv256i1(<vscale x 256 x i1> %v) {
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; CHECK-NEXT: vmand.mm v8, v8, v10
; CHECK-NEXT: vmand.mm v9, v0, v9
-; CHECK-NEXT: vmand.mm v8, v9, v8
-; CHECK-NEXT: vmnot.m v8, v8
+; CHECK-NEXT: vmnand.mm v8, v9, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
@@ -1019,8 +1013,7 @@ define zeroext i1 @vreduce_and_nxv512i1(<vscale x 512 x i1> %v) {
; CHECK-NEXT: vmand.mm v11, v0, v11
; CHECK-NEXT: vmand.mm v8, v8, v10
; CHECK-NEXT: vmand.mm v9, v11, v9
-; CHECK-NEXT: vmand.mm v8, v9, v8
-; CHECK-NEXT: vmnot.m v8, v8
+; CHECK-NEXT: vmnand.mm v8, v9, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
@@ -1060,8 +1053,7 @@ define zeroext i1 @vreduce_smax_nxv512i1(<vscale x 512 x i1> %v) {
; CHECK-NEXT: vmand.mm v11, v0, v11
; CHECK-NEXT: vmand.mm v8, v8, v10
; CHECK-NEXT: vmand.mm v9, v11, v9
-; CHECK-NEXT: vmand.mm v8, v9, v8
-; CHECK-NEXT: vmnot.m v8, v8
+; CHECK-NEXT: vmnand.mm v8, v9, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
@@ -1081,8 +1073,7 @@ define zeroext i1 @vreduce_umin_nxv512i1(<vscale x 512 x i1> %v) {
; CHECK-NEXT: vmand.mm v11, v0, v11
; CHECK-NEXT: vmand.mm v8, v8, v10
; CHECK-NEXT: vmand.mm v9, v11, v9
-; CHECK-NEXT: vmand.mm v8, v9, v8
-; CHECK-NEXT: vmnot.m v8, v8
+; CHECK-NEXT: vmnand.mm v8, v9, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
@@ -1186,8 +1177,7 @@ define zeroext i1 @vreduce_and_nxv1024i1(<vscale x 1024 x i1> %v) {
; CHECK-NEXT: vmand.mm v11, v15, v11
; CHECK-NEXT: vmand.mm v8, v8, v10
; CHECK-NEXT: vmand.mm v9, v11, v9
-; CHECK-NEXT: vmand.mm v8, v9, v8
-; CHECK-NEXT: vmnot.m v8, v8
+; CHECK-NEXT: vmnand.mm v8, v9, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
@@ -1243,8 +1233,7 @@ define zeroext i1 @vreduce_smax_nxv1024i1(<vscale x 1024 x i1> %v) {
; CHECK-NEXT: vmand.mm v11, v15, v11
; CHECK-NEXT: vmand.mm v8, v8, v10
; CHECK-NEXT: vmand.mm v9, v11, v9
-; CHECK-NEXT: vmand.mm v8, v9, v8
-; CHECK-NEXT: vmnot.m v8, v8
+; CHECK-NEXT: vmnand.mm v8, v9, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
@@ -1272,8 +1261,7 @@ define zeroext i1 @vreduce_umin_nxv1024i1(<vscale x 1024 x i1> %v) {
; CHECK-NEXT: vmand.mm v11, v15, v11
; CHECK-NEXT: vmand.mm v8, v8, v10
; CHECK-NEXT: vmand.mm v9, v11, v9
-; CHECK-NEXT: vmand.mm v8, v9, v8
-; CHECK-NEXT: vmnot.m v8, v8
+; CHECK-NEXT: vmnand.mm v8, v9, v8
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
More information about the llvm-commits
mailing list