[llvm] fb0d636 - [RISCV][SelectionDAG] Support VP_REDUCE_ADD mask operation.
Lian Wang via llvm-commits
llvm-commits at lists.llvm.org
Thu May 5 18:58:17 PDT 2022
Author: Lian Wang
Date: 2022-05-06T01:49:21Z
New Revision: fb0d636f285bdef9360747343f37ab857825eb1b
URL: https://github.com/llvm/llvm-project/commit/fb0d636f285bdef9360747343f37ab857825eb1b
DIFF: https://github.com/llvm/llvm-project/commit/fb0d636f285bdef9360747343f37ab857825eb1b.diff
LOG: [RISCV][SelectionDAG] Support VP_REDUCE_ADD mask operation.
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D124986
Added:
Modified:
llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index cc8fc675e5257..fc3a19f4a9f59 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -8862,6 +8862,11 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
Opcode = ISD::VP_AND;
break;
+ case ISD::VP_REDUCE_ADD:
+ // If it is VP_REDUCE_ADD mask operation then turn it to VP_REDUCE_XOR
+ if (VT == MVT::i1)
+ Opcode = ISD::VP_REDUCE_XOR;
+ break;
}
// Memoize nodes.
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
index a744401dabd60..8b6004bc18bc6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
@@ -315,3 +315,88 @@ define signext i1 @vpreduce_xor_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m,
%r = call i1 @llvm.vp.reduce.xor.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
ret i1 %r
}
+
+declare i1 @llvm.vp.reduce.add.v1i1(i1, <1 x i1>, <1 x i1>, i32)
+
+define signext i1 @vpreduce_add_v1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_add_v1i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
+; CHECK-NEXT: xor a0, a1, a0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: ret
+ %r = call i1 @llvm.vp.reduce.add.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
+ ret i1 %r
+}
+
+declare i1 @llvm.vp.reduce.add.v2i1(i1, <2 x i1>, <2 x i1>, i32)
+
+define signext i1 @vpreduce_add_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_add_v2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
+; CHECK-NEXT: xor a0, a1, a0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: ret
+ %r = call i1 @llvm.vp.reduce.add.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
+ ret i1 %r
+}
+
+declare i1 @llvm.vp.reduce.add.v4i1(i1, <4 x i1>, <4 x i1>, i32)
+
+define signext i1 @vpreduce_add_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_add_v4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
+; CHECK-NEXT: xor a0, a1, a0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: ret
+ %r = call i1 @llvm.vp.reduce.add.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
+ ret i1 %r
+}
+
+declare i1 @llvm.vp.reduce.add.v8i1(i1, <8 x i1>, <8 x i1>, i32)
+
+define signext i1 @vpreduce_add_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_add_v8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
+; CHECK-NEXT: xor a0, a1, a0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: ret
+ %r = call i1 @llvm.vp.reduce.add.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
+ ret i1 %r
+}
+
+declare i1 @llvm.vp.reduce.add.v16i1(i1, <16 x i1>, <16 x i1>, i32)
+
+define signext i1 @vpreduce_add_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_add_v16i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
+; CHECK-NEXT: xor a0, a1, a0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: ret
+ %r = call i1 @llvm.vp.reduce.add.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
+ ret i1 %r
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
index 87c899450eed0..89c798f11c97b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
@@ -419,3 +419,122 @@ define signext i1 @vpreduce_or_nxv128i1(i1 signext %s, <vscale x 128 x i1> %v, <
%r = call i1 @llvm.vp.reduce.or.nxv128i1(i1 %s, <vscale x 128 x i1> %v, <vscale x 128 x i1> %m, i32 %evl)
ret i1 %r
}
+
+declare i1 @llvm.vp.reduce.add.nxv1i1(i1, <vscale x 1 x i1>, <vscale x 1 x i1>, i32)
+
+define signext i1 @vpreduce_add_nxv1i1(i1 signext %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_add_nxv1i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
+; CHECK-NEXT: xor a0, a1, a0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: ret
+ %r = call i1 @llvm.vp.reduce.add.nxv1i1(i1 %s, <vscale x 1 x i1> %v, <vscale x 1 x i1> %m, i32 %evl)
+ ret i1 %r
+}
+
+declare i1 @llvm.vp.reduce.add.nxv2i1(i1, <vscale x 2 x i1>, <vscale x 2 x i1>, i32)
+
+define signext i1 @vpreduce_add_nxv2i1(i1 signext %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_add_nxv2i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
+; CHECK-NEXT: xor a0, a1, a0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: ret
+ %r = call i1 @llvm.vp.reduce.add.nxv2i1(i1 %s, <vscale x 2 x i1> %v, <vscale x 2 x i1> %m, i32 %evl)
+ ret i1 %r
+}
+
+declare i1 @llvm.vp.reduce.add.nxv4i1(i1, <vscale x 4 x i1>, <vscale x 4 x i1>, i32)
+
+define signext i1 @vpreduce_add_nxv4i1(i1 signext %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_add_nxv4i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
+; CHECK-NEXT: xor a0, a1, a0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: ret
+ %r = call i1 @llvm.vp.reduce.add.nxv4i1(i1 %s, <vscale x 4 x i1> %v, <vscale x 4 x i1> %m, i32 %evl)
+ ret i1 %r
+}
+
+declare i1 @llvm.vp.reduce.add.nxv8i1(i1, <vscale x 8 x i1>, <vscale x 8 x i1>, i32)
+
+define signext i1 @vpreduce_add_nxv8i1(i1 signext %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_add_nxv8i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
+; CHECK-NEXT: xor a0, a1, a0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: ret
+ %r = call i1 @llvm.vp.reduce.add.nxv8i1(i1 %s, <vscale x 8 x i1> %v, <vscale x 8 x i1> %m, i32 %evl)
+ ret i1 %r
+}
+
+declare i1 @llvm.vp.reduce.add.nxv16i1(i1, <vscale x 16 x i1>, <vscale x 16 x i1>, i32)
+
+define signext i1 @vpreduce_add_nxv16i1(i1 signext %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_add_nxv16i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
+; CHECK-NEXT: xor a0, a1, a0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: ret
+ %r = call i1 @llvm.vp.reduce.add.nxv16i1(i1 %s, <vscale x 16 x i1> %v, <vscale x 16 x i1> %m, i32 %evl)
+ ret i1 %r
+}
+
+declare i1 @llvm.vp.reduce.add.nxv32i1(i1, <vscale x 32 x i1>, <vscale x 32 x i1>, i32)
+
+define signext i1 @vpreduce_add_nxv32i1(i1 signext %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_add_nxv32i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
+; CHECK-NEXT: xor a0, a1, a0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: ret
+ %r = call i1 @llvm.vp.reduce.add.nxv32i1(i1 %s, <vscale x 32 x i1> %v, <vscale x 32 x i1> %m, i32 %evl)
+ ret i1 %r
+}
+
+declare i1 @llvm.vp.reduce.add.nxv64i1(i1, <vscale x 64 x i1>, <vscale x 64 x i1>, i32)
+
+define signext i1 @vpreduce_add_nxv64i1(i1 signext %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_add_nxv64i1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmv1r.v v9, v0
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
+; CHECK-NEXT: xor a0, a1, a0
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: neg a0, a0
+; CHECK-NEXT: ret
+ %r = call i1 @llvm.vp.reduce.add.nxv64i1(i1 %s, <vscale x 64 x i1> %v, <vscale x 64 x i1> %m, i32 %evl)
+ ret i1 %r
+}
More information about the llvm-commits
mailing list