[llvm] 6452549 - [DAGCombiner] Fold vecreduce_or/and if operand is insert_subvector.

Sander de Smalen via llvm-commits llvm-commits at lists.llvm.org
Sat Feb 5 06:36:21 PST 2022


Author: Sander de Smalen
Date: 2022-02-05T14:35:53Z
New Revision: 6452549f300cf1a8198708bb4d0ed3344ae05018

URL: https://github.com/llvm/llvm-project/commit/6452549f300cf1a8198708bb4d0ed3344ae05018
DIFF: https://github.com/llvm/llvm-project/commit/6452549f300cf1a8198708bb4d0ed3344ae05018.diff

LOG: [DAGCombiner] Fold vecreduce_or/and if operand is insert_subvector.

Fold:
  vecreduce_or(insert_subvec(zeroinitializer, vec))
  -> vecreduce_or(vec)

  vecreduce_and(insert_subvec(allones, vec))
  -> vecreduce_and(vec)

  vecreduce_and/or(insert_subvec(undef, vec))
  -> vecreduce_and/or(vec)

This is useful for SVE which uses insert/extract subvector
to convert fixed-width to/from scalable vectors.

Reviewed By: bsmith

Differential Revision: https://reviews.llvm.org/D118919

Added: 
    llvm/test/CodeGen/AArch64/sve-vecreduce-fold.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index add9a21fa7219..b69524a99f6c2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -22519,6 +22519,19 @@ SDValue DAGCombiner::visitVECREDUCE(SDNode *N) {
       return DAG.getNode(NewOpcode, SDLoc(N), N->getValueType(0), N0);
   }
 
+  // vecreduce_or(insert_subvector(zero or undef, val)) -> vecreduce_or(val)
+  // vecreduce_and(insert_subvector(ones or undef, val)) -> vecreduce_and(val)
+  if (N0.getOpcode() == ISD::INSERT_SUBVECTOR &&
+      TLI.isTypeLegal(N0.getOperand(1).getValueType())) {
+    SDValue Vec = N0.getOperand(0);
+    SDValue Subvec = N0.getOperand(1);
+    if ((Opcode == ISD::VECREDUCE_OR &&
+         (N0.getOperand(0).isUndef() || isNullOrNullSplat(Vec))) ||
+        (Opcode == ISD::VECREDUCE_AND &&
+         (N0.getOperand(0).isUndef() || isAllOnesOrAllOnesSplat(Vec))))
+      return DAG.getNode(Opcode, SDLoc(N), N->getValueType(0), Subvec);
+  }
+
   return SDValue();
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-vecreduce-fold.ll b/llvm/test/CodeGen/AArch64/sve-vecreduce-fold.ll
new file mode 100644
index 0000000000000..8aba01bf1f56f
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-vecreduce-fold.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64 -mattr=+sve < %s | FileCheck %s
+
+;
+; OR reductions
+;
+
+define i1 @reduce_or_insert_subvec_into_zero(<vscale x 4 x i1> %in) {
+; CHECK-LABEL: reduce_or_insert_subvec_into_zero:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptest p0, p0.b
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %t = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> zeroinitializer, <vscale x 4 x i1> %in, i64 0)
+  %res = call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> %t)
+  ret i1 %res
+}
+
+define i1 @reduce_or_insert_subvec_into_poison(<vscale x 4 x i1> %in) {
+; CHECK-LABEL: reduce_or_insert_subvec_into_poison:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptest p0, p0.b
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %t = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> poison, <vscale x 4 x i1> %in, i64 0)
+  %res = call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> %t)
+  ret i1 %res
+}
+
+define i1 @reduce_or_insert_subvec_into_nonzero(<vscale x 4 x i1> %in, <vscale x 16 x i1> %vec) {
+; CHECK-LABEL: reduce_or_insert_subvec_into_nonzero:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    punpklo p2.h, p1.b
+; CHECK-NEXT:    punpkhi p1.h, p1.b
+; CHECK-NEXT:    punpkhi p2.h, p2.b
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p2.h
+; CHECK-NEXT:    uzp1 p0.b, p0.b, p1.b
+; CHECK-NEXT:    ptest p0, p0.b
+; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    ret
+  %t = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> %vec, <vscale x 4 x i1> %in, i64 0)
+  %res = call i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1> %t)
+  ret i1 %res
+}
+
+;
+; AND reductions
+;
+
+define i1 @reduce_and_insert_subvec_into_ones(<vscale x 4 x i1> %in) {
+; CHECK-LABEL: reduce_and_insert_subvec_into_ones:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    not p0.b, p1/z, p0.b
+; CHECK-NEXT:    ptest p1, p0.b
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
+  %allones.ins = insertelement <vscale x 16 x i1> poison, i1 1, i32 0
+  %allones = shufflevector <vscale x 16 x i1> %allones.ins,  <vscale x 16 x i1> poison,  <vscale x 16 x i32> zeroinitializer
+  %t = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> %allones, <vscale x 4 x i1> %in, i64 0)
+  %res = call i1 @llvm.vector.reduce.and.nxv16i1(<vscale x 16 x i1> %t)
+  ret i1 %res
+}
+
+define i1 @reduce_and_insert_subvec_into_poison(<vscale x 4 x i1> %in) {
+; CHECK-LABEL: reduce_and_insert_subvec_into_poison:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p1.s
+; CHECK-NEXT:    not p0.b, p1/z, p0.b
+; CHECK-NEXT:    ptest p1, p0.b
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
+  %t = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> poison, <vscale x 4 x i1> %in, i64 0)
+  %res = call i1 @llvm.vector.reduce.and.nxv16i1(<vscale x 16 x i1> %t)
+  ret i1 %res
+}
+
+define i1 @reduce_and_insert_subvec_into_var(<vscale x 4 x i1> %in, <vscale x 16 x i1> %vec) {
+; CHECK-LABEL: reduce_and_insert_subvec_into_var:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    punpklo p3.h, p1.b
+; CHECK-NEXT:    punpkhi p1.h, p1.b
+; CHECK-NEXT:    punpkhi p3.h, p3.b
+; CHECK-NEXT:    ptrue p2.b
+; CHECK-NEXT:    uzp1 p0.h, p0.h, p3.h
+; CHECK-NEXT:    uzp1 p0.b, p0.b, p1.b
+; CHECK-NEXT:    not p0.b, p2/z, p0.b
+; CHECK-NEXT:    ptest p2, p0.b
+; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    ret
+  %t = call <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1> %vec, <vscale x 4 x i1> %in, i64 0)
+  %res = call i1 @llvm.vector.reduce.and.nxv16i1(<vscale x 16 x i1> %t)
+  ret i1 %res
+}
+
+declare i1 @llvm.vector.reduce.and.nxv16i1(<vscale x 16 x i1>)
+declare i1 @llvm.vector.reduce.or.nxv16i1(<vscale x 16 x i1>)
+declare <vscale x 16 x i1> @llvm.experimental.vector.insert.nxv16i1.nxv4i1(<vscale x 16 x i1>, <vscale x 4 x i1>, i64)


        


More information about the llvm-commits mailing list