[llvm] r344554 - [AARCH64] Improve vector popcnt lowering with ADDLP

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 15 14:15:58 PDT 2018


Author: rksimon
Date: Mon Oct 15 14:15:58 2018
New Revision: 344554

URL: http://llvm.org/viewvc/llvm-project?rev=344554&view=rev
Log:
[AARCH64] Improve vector popcnt lowering with ADDLP

AARCH64 equivalent to D53257 - uses widening pairwise adds on vXi8 CTPOP to support i16/i32/i64 vectors.

This is a blocker for generic vector CTPOP expansion (P32655) - this will remove the aarch64 diff from D53258.

Differential Revision: https://reviews.llvm.org/D53259

Modified:
    llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/trunk/test/CodeGen/AArch64/arm64-vpopcnt.ll

Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=344554&r1=344553&r2=344554&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Mon Oct 15 14:15:58 2018
@@ -792,9 +792,9 @@ void AArch64TargetLowering::addTypeForNE
   for (MVT InnerVT : MVT::all_valuetypes())
     setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
 
-  // CNT supports only B element sizes.
+  // CNT supports only B element sizes, then use UADDLP to widen.
   if (VT != MVT::v8i8 && VT != MVT::v16i8)
-    setOperationAction(ISD::CTPOP, VT, Expand);
+    setOperationAction(ISD::CTPOP, VT, Custom);
 
   setOperationAction(ISD::UDIV, VT, Expand);
   setOperationAction(ISD::SDIV, VT, Expand);
@@ -4539,18 +4539,42 @@ SDValue AArch64TargetLowering::LowerCTPO
   SDLoc DL(Op);
   EVT VT = Op.getValueType();
 
-  if (VT == MVT::i32)
-    Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val);
-  Val = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Val);
-
-  SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, Val);
-  SDValue UaddLV = DAG.getNode(
-      ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
-      DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop);
-
-  if (VT == MVT::i64)
-    UaddLV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, UaddLV);
-  return UaddLV;
+  if (VT == MVT::i32 || VT == MVT::i64) {
+    if (VT == MVT::i32)
+      Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val);
+    Val = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Val);
+
+    SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, Val);
+    SDValue UaddLV = DAG.getNode(
+        ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32,
+        DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop);
+
+    if (VT == MVT::i64)
+      UaddLV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, UaddLV);
+    return UaddLV;
+  }
+
+  assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 ||
+          VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) &&
+         "Unexpected type for custom ctpop lowering");
+
+  EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
+  Val = DAG.getBitcast(VT8Bit, Val);
+  Val = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Val);
+
+  // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds.
+  unsigned EltSize = 8;
+  unsigned NumElts = VT.is64BitVector() ? 8 : 16;
+  while (EltSize != VT.getScalarSizeInBits()) {
+    EltSize *= 2;
+    NumElts /= 2;
+    MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts);
+    Val = DAG.getNode(
+        ISD::INTRINSIC_WO_CHAIN, DL, WidenVT,
+        DAG.getConstant(Intrinsic::aarch64_neon_uaddlp, DL, MVT::i32), Val);
+  }
+
+  return Val;
 }
 
 SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vpopcnt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vpopcnt.ll?rev=344554&r1=344553&r2=344554&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vpopcnt.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vpopcnt.ll Mon Oct 15 14:15:58 2018
@@ -17,30 +17,8 @@ declare <8 x i8> @llvm.ctpop.v8i8(<8 x i
 define <4 x i16> @ctpopv4i16(<4 x i16> %x) nounwind readnone {
 ; CHECK-LABEL: ctpopv4i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    umov w8, v0.h[0]
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    cnt v1.8b, v1.8b
-; CHECK-NEXT:    uaddlv h1, v1.8b
-; CHECK-NEXT:    umov w8, v0.h[1]
-; CHECK-NEXT:    fmov d2, x8
-; CHECK-NEXT:    cnt v2.8b, v2.8b
-; CHECK-NEXT:    uaddlv h2, v2.8b
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov v1.h[1], w8
-; CHECK-NEXT:    umov w8, v0.h[2]
-; CHECK-NEXT:    fmov d2, x8
-; CHECK-NEXT:    cnt v2.8b, v2.8b
-; CHECK-NEXT:    uaddlv h2, v2.8b
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov v1.h[2], w8
-; CHECK-NEXT:    umov w8, v0.h[3]
-; CHECK-NEXT:    fmov d0, x8
 ; CHECK-NEXT:    cnt v0.8b, v0.8b
-; CHECK-NEXT:    uaddlv h0, v0.8b
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov v1.h[3], w8
-; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    uaddlp v0.4h, v0.8b
 ; CHECK-NEXT:    ret
   %cnt = tail call <4 x i16> @llvm.ctpop.v4i16(<4 x i16> %x)
   ret <4 x i16> %cnt
@@ -51,18 +29,9 @@ declare <4 x i16> @llvm.ctpop.v4i16(<4 x
 define <2 x i32> @ctpopv2i32(<2 x i32> %x) nounwind readnone {
 ; CHECK-LABEL: ctpopv2i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    mov w8, v0.s[1]
-; CHECK-NEXT:    fmov w0, s0
-; CHECK-NEXT:    fmov d0, x0
 ; CHECK-NEXT:    cnt v0.8b, v0.8b
-; CHECK-NEXT:    uaddlv h0, v0.8b
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    cnt v1.8b, v1.8b
-; CHECK-NEXT:    uaddlv h1, v1.8b
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov v0.s[1], w8
-; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    uaddlp v0.4h, v0.8b
+; CHECK-NEXT:    uaddlp v0.2s, v0.4h
 ; CHECK-NEXT:    ret
   %cnt = tail call <2 x i32> @llvm.ctpop.v2i32(<2 x i32> %x)
   ret <2 x i32> %cnt
@@ -70,6 +39,20 @@ define <2 x i32> @ctpopv2i32(<2 x i32> %
 
 declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>) nounwind readnone
 
+define <1 x i64> @ctpopv1i64(<1 x i64> %x) nounwind readnone {
+; CHECK-LABEL: ctpopv1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cnt v0.8b, v0.8b
+; CHECK-NEXT:    uaddlp v0.4h, v0.8b
+; CHECK-NEXT:    uaddlp v0.2s, v0.4h
+; CHECK-NEXT:    uaddlp v0.1d, v0.2s
+; CHECK-NEXT:    ret
+  %cnt = tail call <1 x i64> @llvm.ctpop.v1i64(<1 x i64> %x)
+  ret <1 x i64> %cnt
+}
+
+declare <1 x i64> @llvm.ctpop.v1i64(<1 x i64>) nounwind readnone
+
 define <16 x i8> @ctpopv16i8(<16 x i8> %x) nounwind readnone {
 ; CHECK-LABEL: ctpopv16i8:
 ; CHECK:       // %bb.0:
@@ -84,53 +67,8 @@ declare <16 x i8> @llvm.ctpop.v16i8(<16
 define <8 x i16> @ctpopv8i16(<8 x i16> %x) nounwind readnone {
 ; CHECK-LABEL: ctpopv8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    umov w8, v0.h[1]
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    cnt v1.8b, v1.8b
-; CHECK-NEXT:    uaddlv h1, v1.8b
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    umov w9, v0.h[0]
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    cnt v1.8b, v1.8b
-; CHECK-NEXT:    uaddlv h1, v1.8b
-; CHECK-NEXT:    mov v1.h[1], w8
-; CHECK-NEXT:    umov w8, v0.h[2]
-; CHECK-NEXT:    fmov d2, x8
-; CHECK-NEXT:    cnt v2.8b, v2.8b
-; CHECK-NEXT:    uaddlv h2, v2.8b
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov v1.h[2], w8
-; CHECK-NEXT:    umov w8, v0.h[3]
-; CHECK-NEXT:    fmov d2, x8
-; CHECK-NEXT:    cnt v2.8b, v2.8b
-; CHECK-NEXT:    uaddlv h2, v2.8b
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov v1.h[3], w8
-; CHECK-NEXT:    umov w8, v0.h[4]
-; CHECK-NEXT:    fmov d2, x8
-; CHECK-NEXT:    cnt v2.8b, v2.8b
-; CHECK-NEXT:    uaddlv h2, v2.8b
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov v1.h[4], w8
-; CHECK-NEXT:    umov w8, v0.h[5]
-; CHECK-NEXT:    fmov d2, x8
-; CHECK-NEXT:    cnt v2.8b, v2.8b
-; CHECK-NEXT:    uaddlv h2, v2.8b
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov v1.h[5], w8
-; CHECK-NEXT:    umov w8, v0.h[6]
-; CHECK-NEXT:    fmov d2, x8
-; CHECK-NEXT:    cnt v2.8b, v2.8b
-; CHECK-NEXT:    uaddlv h2, v2.8b
-; CHECK-NEXT:    fmov w8, s2
-; CHECK-NEXT:    mov v1.h[6], w8
-; CHECK-NEXT:    umov w8, v0.h[7]
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    cnt v0.8b, v0.8b
-; CHECK-NEXT:    uaddlv h0, v0.8b
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov v1.h[7], w8
-; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    cnt v0.16b, v0.16b
+; CHECK-NEXT:    uaddlp v0.8h, v0.16b
 ; CHECK-NEXT:    ret
   %cnt = tail call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %x)
   ret <8 x i16> %cnt
@@ -141,28 +79,9 @@ declare <8 x i16> @llvm.ctpop.v8i16(<8 x
 define <4 x i32> @ctpopv4i32(<4 x i32> %x) nounwind readnone {
 ; CHECK-LABEL: ctpopv4i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, v0.s[1]
-; CHECK-NEXT:    mov w9, v0.s[2]
-; CHECK-NEXT:    mov w10, v0.s[3]
-; CHECK-NEXT:    fmov w0, s0
-; CHECK-NEXT:    fmov d0, x0
-; CHECK-NEXT:    cnt v0.8b, v0.8b
-; CHECK-NEXT:    uaddlv h0, v0.8b
-; CHECK-NEXT:    fmov d1, x8
-; CHECK-NEXT:    cnt v1.8b, v1.8b
-; CHECK-NEXT:    uaddlv h1, v1.8b
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov v0.s[1], w8
-; CHECK-NEXT:    fmov d1, x9
-; CHECK-NEXT:    cnt v1.8b, v1.8b
-; CHECK-NEXT:    uaddlv h1, v1.8b
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov v0.s[2], w8
-; CHECK-NEXT:    fmov d1, x10
-; CHECK-NEXT:    cnt v1.8b, v1.8b
-; CHECK-NEXT:    uaddlv h1, v1.8b
-; CHECK-NEXT:    fmov w8, s1
-; CHECK-NEXT:    mov v0.s[3], w8
+; CHECK-NEXT:    cnt v0.16b, v0.16b
+; CHECK-NEXT:    uaddlp v0.8h, v0.16b
+; CHECK-NEXT:    uaddlp v0.4s, v0.8h
 ; CHECK-NEXT:    ret
   %cnt = tail call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %x)
   ret <4 x i32> %cnt
@@ -173,17 +92,10 @@ declare <4 x i32> @llvm.ctpop.v4i32(<4 x
 define <2 x i64> @ctpopv2i64(<2 x i64> %x) nounwind readnone {
 ; CHECK-LABEL: ctpopv2i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cnt v1.8b, v0.8b
-; CHECK-NEXT:    uaddlv h1, v1.8b
-; CHECK-NEXT:    fmov w0, s1
-; CHECK-NEXT:    fmov d1, x0
-; CHECK-NEXT:    mov x8, v0.d[1]
-; CHECK-NEXT:    fmov d0, x8
-; CHECK-NEXT:    cnt v0.8b, v0.8b
-; CHECK-NEXT:    uaddlv h0, v0.8b
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    mov v1.d[1], x8
-; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    cnt v0.16b, v0.16b
+; CHECK-NEXT:    uaddlp v0.8h, v0.16b
+; CHECK-NEXT:    uaddlp v0.4s, v0.8h
+; CHECK-NEXT:    uaddlp v0.2d, v0.4s
 ; CHECK-NEXT:    ret
   %cnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %x)
   ret <2 x i64> %cnt




More information about the llvm-commits mailing list