[llvm] ee8aa35 - [AArch64] Use ADDV for boolean xor reductions.

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Mon May 16 14:36:32 PDT 2022


Author: Paul Walker
Date: 2022-05-16T22:34:12+01:00
New Revision: ee8aa351e43f27e1dd4240560a3e054ca045186f

URL: https://github.com/llvm/llvm-project/commit/ee8aa351e43f27e1dd4240560a3e054ca045186f
DIFF: https://github.com/llvm/llvm-project/commit/ee8aa351e43f27e1dd4240560a3e054ca045186f.diff

LOG: [AArch64] Use ADDV for boolean xor reductions.

NEON does not have native support for xor reductions. However, when
reducing predicate vectors the operation is synonymous with an add
reduction that is supported.

Differential Revision: https://reviews.llvm.org/D125605

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/reduce-xor.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 95d7269384050..de60ee4181f4b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -14540,24 +14540,34 @@ static SDValue performANDCombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI) {
   SelectionDAG &DAG = DCI.DAG;
   SDValue LHS = N->getOperand(0);
+  SDValue RHS = N->getOperand(1);
   EVT VT = N->getValueType(0);
 
   if (SDValue R = performANDORCSELCombine(N, DAG))
     return R;
 
-  if (!VT.isVector() || !DAG.getTargetLoweringInfo().isTypeLegal(VT))
+  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
     return SDValue();
 
+  // Although NEON has no EORV instruction, when only the least significant bit
+  // is required the operation is synonymous with ADDV.
+  if (LHS.getOpcode() == ISD::VECREDUCE_XOR && isOneConstant(RHS) &&
+      LHS.getOperand(0).getValueType().isFixedLengthVector() &&
+      LHS.hasOneUse()) {
+    SDLoc DL(N);
+    SDValue ADDV = DAG.getNode(ISD::VECREDUCE_ADD, DL, VT, LHS.getOperand(0));
+    return DAG.getNode(ISD::AND, DL, VT, ADDV, RHS);
+  }
+
   if (VT.isScalableVector())
     return performSVEAndCombine(N, DCI);
 
   // The combining code below works only for NEON vectors. In particular, it
   // does not work for SVE when dealing with vectors wider than 128 bits.
-  if (!(VT.is64BitVector() || VT.is128BitVector()))
+  if (!VT.is64BitVector() && !VT.is128BitVector())
     return SDValue();
 
-  BuildVectorSDNode *BVN =
-      dyn_cast<BuildVectorSDNode>(N->getOperand(1).getNode());
+  BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
   if (!BVN)
     return SDValue();
 

diff  --git a/llvm/test/CodeGen/AArch64/reduce-xor.ll b/llvm/test/CodeGen/AArch64/reduce-xor.ll
index 5b07b58152541..8c3db972734d7 100644
--- a/llvm/test/CodeGen/AArch64/reduce-xor.ll
+++ b/llvm/test/CodeGen/AArch64/reduce-xor.ll
@@ -19,10 +19,8 @@ define i1 @test_redxor_v1i1(<1 x i1> %a) {
 define i1 @test_redxor_v2i1(<2 x i1> %a) {
 ; CHECK-LABEL: test_redxor_v2i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov w8, v0.s[1]
-; CHECK-NEXT:    fmov w9, s0
-; CHECK-NEXT:    eor w8, w9, w8
+; CHECK-NEXT:    addp v0.2s, v0.2s, v0.2s
+; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    and w0, w8, #0x1
 ; CHECK-NEXT:    ret
 ;
@@ -42,14 +40,8 @@ define i1 @test_redxor_v2i1(<2 x i1> %a) {
 define i1 @test_redxor_v4i1(<4 x i1> %a) {
 ; CHECK-LABEL: test_redxor_v4i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    umov w8, v0.h[1]
-; CHECK-NEXT:    umov w9, v0.h[0]
-; CHECK-NEXT:    umov w10, v0.h[2]
-; CHECK-NEXT:    umov w11, v0.h[3]
-; CHECK-NEXT:    eor w8, w9, w8
-; CHECK-NEXT:    eor w8, w8, w10
-; CHECK-NEXT:    eor w8, w8, w11
+; CHECK-NEXT:    addv h0, v0.4h
+; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    and w0, w8, #0x1
 ; CHECK-NEXT:    ret
 ;
@@ -75,22 +67,8 @@ define i1 @test_redxor_v4i1(<4 x i1> %a) {
 define i1 @test_redxor_v8i1(<8 x i1> %a) {
 ; CHECK-LABEL: test_redxor_v8i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    umov w8, v0.b[1]
-; CHECK-NEXT:    umov w9, v0.b[0]
-; CHECK-NEXT:    umov w10, v0.b[2]
-; CHECK-NEXT:    umov w11, v0.b[3]
-; CHECK-NEXT:    umov w12, v0.b[4]
-; CHECK-NEXT:    umov w13, v0.b[5]
-; CHECK-NEXT:    eor w8, w9, w8
-; CHECK-NEXT:    umov w9, v0.b[6]
-; CHECK-NEXT:    eor w8, w8, w10
-; CHECK-NEXT:    umov w10, v0.b[7]
-; CHECK-NEXT:    eor w8, w8, w11
-; CHECK-NEXT:    eor w8, w8, w12
-; CHECK-NEXT:    eor w8, w8, w13
-; CHECK-NEXT:    eor w8, w8, w9
-; CHECK-NEXT:    eor w8, w8, w10
+; CHECK-NEXT:    addv b0, v0.8b
+; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    and w0, w8, #0x1
 ; CHECK-NEXT:    ret
 ;
@@ -128,23 +106,8 @@ define i1 @test_redxor_v8i1(<8 x i1> %a) {
 define i1 @test_redxor_v16i1(<16 x i1> %a) {
 ; CHECK-LABEL: test_redxor_v16i1:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    eor v0.8b, v0.8b, v1.8b
-; CHECK-NEXT:    umov w8, v0.b[1]
-; CHECK-NEXT:    umov w9, v0.b[0]
-; CHECK-NEXT:    umov w10, v0.b[2]
-; CHECK-NEXT:    umov w11, v0.b[3]
-; CHECK-NEXT:    umov w12, v0.b[4]
-; CHECK-NEXT:    eor w8, w9, w8
-; CHECK-NEXT:    umov w9, v0.b[5]
-; CHECK-NEXT:    eor w8, w8, w10
-; CHECK-NEXT:    umov w10, v0.b[6]
-; CHECK-NEXT:    eor w8, w8, w11
-; CHECK-NEXT:    umov w11, v0.b[7]
-; CHECK-NEXT:    eor w8, w8, w12
-; CHECK-NEXT:    eor w8, w8, w9
-; CHECK-NEXT:    eor w8, w8, w10
-; CHECK-NEXT:    eor w8, w8, w11
+; CHECK-NEXT:    addv b0, v0.16b
+; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    and w0, w8, #0x1
 ; CHECK-NEXT:    ret
 ;


        


More information about the llvm-commits mailing list