[llvm] r323016 - [X86] Teach X86 codegen to use vector width preference to avoid promoting to 512-bit types when VLX is enabled and the preference is for a smaller size.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 19 16:26:12 PST 2018


Author: ctopper
Date: Fri Jan 19 16:26:12 2018
New Revision: 323016

URL: http://llvm.org/viewvc/llvm-project?rev=323016&view=rev
Log:
[X86] Teach X86 codegen to use vector width preference to avoid promoting to 512-bit types when VLX is enabled and the preference is for a smaller size.

This change applies to places where we would turn 128/256-bit code into 512-bit in order to get a wider element type through sext/zext. Any 512-bit types that already existed in the IR/DAG will be left that way.

The width preference has no effect on codegen behavior when the target does not have AVX512 enabled. So AVX/AVX2 codegen cannot be limited via this mechanism yet.

If the preference is lower than 256 we may still use a 256 bit type to do the operation. Constraining to 128 bits makes it much more difficult to support some operations. For many of these cases we need to change element width while keeping element count constant which is easiest done by switching between 256 and 128 bit.

The preference is only obeyed when AVX512 and VLX are available. This means the preference is not obeyed for KNL, but is obeyed for SKX, Cannonlake, and Icelake. For KNL, the only way to do masked operation is on 512-bit registers so we would have to completely disable masking to obey the preference. We would also lose support for gather, scatter, ctlz, vXi64 multiplies, etc. This may change in the future, but this simplifies the initial implementation.

Differential Revision: https://reviews.llvm.org/D41895

Added:
    llvm/trunk/test/CodeGen/X86/prefer-avx256-lzcnt.ll
    llvm/trunk/test/CodeGen/X86/prefer-avx256-mask-extend.ll
    llvm/trunk/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll
    llvm/trunk/test/CodeGen/X86/prefer-avx256-popcnt.ll
    llvm/trunk/test/CodeGen/X86/prefer-avx256-shift.ll
    llvm/trunk/test/CodeGen/X86/prefer-avx256-trunc.ll
    llvm/trunk/test/CodeGen/X86/prefer-avx256-wide-mul.ll
Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/lib/Target/X86/X86Subtarget.h

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=323016&r1=323015&r2=323016&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Fri Jan 19 16:26:12 2018
@@ -14323,10 +14323,15 @@ static SDValue lower1BitVectorShuffle(co
     ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
     break;
   case MVT::v16i1:
-    ExtVT = MVT::v16i32;
+    // Take 512-bit type, unless we are avoiding 512-bit types and have the
+    // 256-bit operation available.
+    ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
     break;
   case MVT::v32i1:
-    ExtVT = MVT::v32i16;
+    // Take 512-bit type, unless we are avoiding 512-bit types and have the
+    // 256-bit operation available.
+    assert(Subtarget.hasBWI() && "Expected AVX512BW support");
+    ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
     break;
   case MVT::v64i1:
     ExtVT = MVT::v64i8;
@@ -16361,6 +16366,20 @@ static SDValue LowerAVXExtend(SDValue Op
   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
 }
 
+// Helper to split and extend a v16i1 mask to v16i8 or v16i16.
+static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
+                                   const SDLoc &dl, SelectionDAG &DAG) {
+  assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
+  SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
+                           DAG.getIntPtrConstant(0, dl));
+  SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
+                           DAG.getIntPtrConstant(8, dl));
+  Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
+  Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
+  SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
+  return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
+}
+
 static  SDValue LowerZERO_EXTEND_Mask(SDValue Op,
                                       const X86Subtarget &Subtarget,
                                       SelectionDAG &DAG) {
@@ -16374,8 +16393,13 @@ static  SDValue LowerZERO_EXTEND_Mask(SD
   // Extend VT if the scalar type is v8/v16 and BWI is not supported.
   MVT ExtVT = VT;
   if (!Subtarget.hasBWI() &&
-      (VT.getVectorElementType().getSizeInBits() <= 16))
+      (VT.getVectorElementType().getSizeInBits() <= 16)) {
+    // If v16i32 is to be avoided, we'll need to split and concatenate.
+    if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
+      return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
+
     ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
+  }
 
   // Widen to 512-bits if VLX is not supported.
   MVT WideVT = ExtVT;
@@ -16549,6 +16573,33 @@ static SDValue LowerTruncateVecI1(SDValu
     assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
            "Unexpected vector type.");
     unsigned NumElts = InVT.getVectorNumElements();
+    assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
+    // We need to change to a wider element type that we have support for.
+    // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
+    // For 16 element vectors we extend to v16i32 unless we are explicitly
+    // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
+    // we need to split into two 8 element vectors which we can extend to v8i32,
+    // truncate and concat the results. There's an additional complication if
+    // the original type is v16i8. In that case we can't split the v16i8 so
+    // first we pre-extend it to v16i16 which we can split to v8i16, then extend
+    // to v8i32, truncate that to v8i1 and concat the two halves.
+    if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
+      if (InVT == MVT::v16i8) {
+        // First we need to sign extend up to 256-bits so we can split that.
+        InVT = MVT::v16i16;
+        In = DAG.getNode(ISD::SIGN_EXTEND, DL, InVT, In);
+      }
+      SDValue Lo = extract128BitVector(In, 0, DAG, DL);
+      SDValue Hi = extract128BitVector(In, 8, DAG, DL);
+      // We're split now, just emit two truncates and a concat. The two
+      // truncates will trigger legalization to come back to this function.
+      Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
+      Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
+      return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
+    }
+    // We either have 8 elements or we're allowed to use 512-bit vectors.
+    // If we have VLX, we want to use the narrowest vector that can get the
+    // job done so we use vXi32.
     MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
     MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
     In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
@@ -16580,10 +16631,15 @@ SDValue X86TargetLowering::LowerTRUNCATE
   // vpmovqb/w/d, vpmovdb/w, vpmovwb
   if (Subtarget.hasAVX512()) {
     // word to byte only under BWI
-    if (InVT == MVT::v16i16 && !Subtarget.hasBWI()) // v16i16 -> v16i8
-      return DAG.getNode(ISD::TRUNCATE, DL, VT,
-                         getExtendInVec(X86ISD::VSEXT, DL, MVT::v16i32, In, DAG));
-    return Op;
+    if (InVT == MVT::v16i16 && !Subtarget.hasBWI()) { // v16i16 -> v16i8
+      // Make sure we're allowed to promote 512-bits.
+      if (Subtarget.canExtendTo512DQ())
+        return DAG.getNode(ISD::TRUNCATE, DL, VT,
+                           getExtendInVec(X86ISD::VSEXT, DL, MVT::v16i32, In,
+                                          DAG));
+    } else {
+      return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
+    }
   }
 
   // Truncate with PACKSS if we are truncating a vector with sign-bits that
@@ -18561,8 +18617,13 @@ static SDValue LowerSIGN_EXTEND_Mask(SDV
 
   // Extend VT if the scalar type is v8/v16 and BWI is not supported.
   MVT ExtVT = VT;
-  if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16)
+  if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
+    // If v16i32 is to be avoided, we'll need to split and concatenate.
+    if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
+      return SplitAndExtendv16i1(ISD::SIGN_EXTEND, VT, In, dl, DAG);
+
     ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
+  }
 
   // Widen to 512-bits if VLX is not supported.
   MVT WideVT = ExtVT;
@@ -21845,7 +21906,8 @@ static SDValue Lower512IntUnary(SDValue
 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
 // split the vector, perform operation on it's Lo a Hi part and
 // concatenate the results.
-static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG) {
+static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
+                                         const X86Subtarget &Subtarget) {
   assert(Op.getOpcode() == ISD::CTLZ);
   SDLoc dl(Op);
   MVT VT = Op.getSimpleValueType();
@@ -21856,7 +21918,8 @@ static SDValue LowerVectorCTLZ_AVX512CDI
           "Unsupported element type");
 
   // Split vector, it's Lo and Hi parts will be handled in next iteration.
-  if (16 < NumElems)
+  if (NumElems > 16 ||
+      (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
     return LowerVectorIntUnary(Op, DAG);
 
   MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
@@ -21961,8 +22024,10 @@ static SDValue LowerVectorCTLZ(SDValue O
                                SelectionDAG &DAG) {
   MVT VT = Op.getSimpleValueType();
 
-  if (Subtarget.hasCDI())
-    return LowerVectorCTLZ_AVX512CDI(Op, DAG);
+  if (Subtarget.hasCDI() &&
+      // vXi8 vectors need to be promoted to 512-bits for vXi32.
+      (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
+    return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
 
   // Decompose 256-bit ops into smaller 128-bit ops.
   if (VT.is256BitVector() && !Subtarget.hasInt256())
@@ -22378,7 +22443,7 @@ static SDValue LowerMULH(SDValue Op, con
     SDValue Hi = DAG.getIntPtrConstant(NumElems / 2, dl);
 
     if (VT == MVT::v32i8) {
-      if (Subtarget.hasBWI()) {
+      if (Subtarget.canExtendTo512BW()) {
         SDValue ExA = DAG.getNode(ExAVX, dl, MVT::v32i16, A);
         SDValue ExB = DAG.getNode(ExAVX, dl, MVT::v32i16, B);
         SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v32i16, ExA, ExB);
@@ -23167,10 +23232,12 @@ static SDValue LowerShift(SDValue Op, co
   // It's worth extending once and using the vXi16/vXi32 shifts for smaller
   // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
   // make the existing SSE solution better.
+  // NOTE: We honor prefered vector width before promoting to 512-bits.
   if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
-      (Subtarget.hasAVX512() && VT == MVT::v16i16) ||
-      (Subtarget.hasAVX512() && VT == MVT::v16i8) ||
-      (Subtarget.hasBWI() && VT == MVT::v32i8)) {
+      (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
+      (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
+      (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
+      (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
     assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
            "Unexpected vector type");
     MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
@@ -23995,7 +24062,7 @@ static SDValue LowerVectorCTPOP(SDValue
     unsigned NumElems = VT.getVectorNumElements();
     assert((VT.getVectorElementType() == MVT::i8 ||
             VT.getVectorElementType() == MVT::i16) && "Unexpected type");
-    if (NumElems <= 16) {
+    if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
       MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
       Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
       Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);

Modified: llvm/trunk/lib/Target/X86/X86Subtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86Subtarget.h?rev=323016&r1=323015&r2=323016&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86Subtarget.h (original)
+++ llvm/trunk/lib/Target/X86/X86Subtarget.h Fri Jan 19 16:26:12 2018
@@ -597,6 +597,17 @@ public:
 
   unsigned getPreferVectorWidth() const { return PreferVectorWidth; }
 
+  // Helper functions to determine when we should allow widening to 512-bit
+  // during codegen.
+  // TODO: Currently we're always allowing widening on CPUs without VLX,
+  // because for many cases we don't have a better option.
+  bool canExtendTo512DQ() const {
+    return hasAVX512() && (!hasVLX() || getPreferVectorWidth() >= 512);
+  }
+  bool canExtendTo512BW() const  {
+    return hasBWI() && canExtendTo512DQ();
+  }
+
   bool isXRaySupported() const override { return is64Bit(); }
 
   X86ProcFamilyEnum getProcFamily() const { return X86ProcFamily; }

Added: llvm/trunk/test/CodeGen/X86/prefer-avx256-lzcnt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/prefer-avx256-lzcnt.ll?rev=323016&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/prefer-avx256-lzcnt.ll (added)
+++ llvm/trunk/test/CodeGen/X86/prefer-avx256-lzcnt.ll Fri Jan 19 16:26:12 2018
@@ -0,0 +1,130 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512cd,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX256
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512cd,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512cd,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512cd,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+
+define <8 x i16> @testv8i16(<8 x i16> %in) {
+; AVX256-LABEL: testv8i16:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX256-NEXT:    vplzcntd %ymm0, %ymm0
+; AVX256-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX256-NEXT:    vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX256-NEXT:    vzeroupper
+; AVX256-NEXT:    retq
+;
+; AVX512VL-LABEL: testv8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512VL-NEXT:    vplzcntd %ymm0, %ymm0
+; AVX512VL-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT:    vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512F-LABEL: testv8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512F-NEXT:    vplzcntd %zmm0, %zmm0
+; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT:    vpsubw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+  %out = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %in, i1 false)
+  ret <8 x i16> %out
+}
+
+define <16 x i8> @testv16i8(<16 x i8> %in) {
+; AVX256-LABEL: testv16i8:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX256-NEXT:    vpand %xmm1, %xmm0, %xmm2
+; AVX256-NEXT:    vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX256-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
+; AVX256-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX256-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX256-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX256-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm1
+; AVX256-NEXT:    vpand %xmm1, %xmm2, %xmm1
+; AVX256-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX256-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX256-NEXT:    retq
+;
+; AVX512-LABEL: testv16i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT:    vplzcntd %zmm0, %zmm0
+; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512-NEXT:    vpsubb {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %out = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %in, i1 false)
+  ret <16 x i8> %out
+}
+
+define <16 x i16> @testv16i16(<16 x i16> %in) {
+; AVX256-LABEL: testv16i16:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX256-NEXT:    vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX256-NEXT:    vplzcntd %ymm1, %ymm1
+; AVX256-NEXT:    vpmovdw %ymm1, %xmm1
+; AVX256-NEXT:    vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
+; AVX256-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
+; AVX256-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX256-NEXT:    vplzcntd %ymm0, %ymm0
+; AVX256-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX256-NEXT:    vpsubw %xmm2, %xmm0, %xmm0
+; AVX256-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX256-NEXT:    retq
+;
+; AVX512-LABEL: testv16i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512-NEXT:    vplzcntd %zmm0, %zmm0
+; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512-NEXT:    vpsubw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %out = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %in, i1 false)
+  ret <16 x i16> %out
+}
+
+define <32 x i8> @testv32i8(<32 x i8> %in) {
+; AVX256-LABEL: testv32i8:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX256-NEXT:    vpand %ymm1, %ymm0, %ymm2
+; AVX256-NEXT:    vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
+; AVX256-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
+; AVX256-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX256-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX256-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX256-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm1
+; AVX256-NEXT:    vpand %ymm1, %ymm2, %ymm1
+; AVX256-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX256-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX256-NEXT:    retq
+;
+; AVX512-LABEL: testv32i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512-NEXT:    vplzcntd %zmm1, %zmm1
+; AVX512-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = [24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24]
+; AVX512-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT:    vplzcntd %zmm0, %zmm0
+; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512-NEXT:    vpsubb %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %out = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %in, i1 false)
+  ret <32 x i8> %out
+}
+
+declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1)
+declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1)
+declare <16 x i16> @llvm.ctlz.v16i16(<16 x i16>, i1)
+declare <32 x i8> @llvm.ctlz.v32i8(<32 x i8>, i1)

Added: llvm/trunk/test/CodeGen/X86/prefer-avx256-mask-extend.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/prefer-avx256-mask-extend.ll?rev=323016&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/prefer-avx256-mask-extend.ll (added)
+++ llvm/trunk/test/CodeGen/X86/prefer-avx256-mask-extend.ll Fri Jan 19 16:26:12 2018
@@ -0,0 +1,260 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX256
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+
+define <8 x i16> @testv8i1_sext_v8i16(<8 x i32>* %p) {
+; AVX256-LABEL: testv8i1_sext_v8i16:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX256-NEXT:    vpcmpeqd (%rdi), %ymm0, %k1
+; AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX256-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX256-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX256-NEXT:    vzeroupper
+; AVX256-NEXT:    retq
+;
+; AVX512VL-LABEL: testv8i1_sext_v8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpcmpeqd (%rdi), %ymm0, %k1
+; AVX512VL-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX512VL-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX512VL-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512F-LABEL: testv8i1_sext_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT:    vpcmpeqd (%rdi), %ymm0, %ymm0
+; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT:    # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+  %in = load <8 x i32>, <8 x i32>* %p
+  %cmp = icmp eq <8 x i32> %in, zeroinitializer
+  %ext = sext <8 x i1> %cmp to <8 x i16>
+  ret <8 x i16> %ext
+}
+
+define <16 x i8> @testv16i1_sext_v16i8(<8 x i32>* %p, <8 x i32>* %q) {
+; AVX256-LABEL: testv16i1_sext_v16i8:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX256-NEXT:    vpcmpeqd (%rdi), %ymm0, %k1
+; AVX256-NEXT:    vpcmpeqd (%rsi), %ymm0, %k2
+; AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX256-NEXT:    vmovdqa32 %ymm0, %ymm1 {%k2} {z}
+; AVX256-NEXT:    vpmovdw %ymm1, %xmm1
+; AVX256-NEXT:    vpacksswb %xmm0, %xmm1, %xmm1
+; AVX256-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX256-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX256-NEXT:    vpacksswb %xmm0, %xmm0, %xmm0
+; AVX256-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX256-NEXT:    vzeroupper
+; AVX256-NEXT:    retq
+;
+; AVX512VL-LABEL: testv16i1_sext_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpcmpeqd (%rdi), %ymm0, %k0
+; AVX512VL-NEXT:    vpcmpeqd (%rsi), %ymm0, %k1
+; AVX512VL-NEXT:    kunpckbw %k0, %k1, %k1
+; AVX512VL-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512F-LABEL: testv16i1_sext_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
+; AVX512F-NEXT:    vmovdqa (%rsi), %ymm0
+; AVX512F-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    kunpckbw %k0, %k1, %k1
+; AVX512F-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+  %in = load <8 x i32>, <8 x i32>* %p
+  %cmp = icmp eq <8 x i32> %in, zeroinitializer
+  %in2 = load <8 x i32>, <8 x i32>* %q
+  %cmp2 = icmp eq <8 x i32> %in2, zeroinitializer
+  %concat = shufflevector <8 x i1> %cmp, <8 x i1> %cmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %ext = sext <16 x i1> %concat to <16 x i8>
+  ret <16 x i8> %ext
+}
+
+define <16 x i16> @testv16i1_sext_v16i16(<8 x i32>* %p, <8 x i32>* %q) {
+; AVX256-LABEL: testv16i1_sext_v16i16:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX256-NEXT:    vpcmpeqd (%rdi), %ymm0, %k1
+; AVX256-NEXT:    vpcmpeqd (%rsi), %ymm0, %k2
+; AVX256-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX256-NEXT:    vmovdqa32 %ymm0, %ymm1 {%k1} {z}
+; AVX256-NEXT:    vpmovdw %ymm1, %xmm1
+; AVX256-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k2} {z}
+; AVX256-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX256-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX256-NEXT:    retq
+;
+; AVX512VL-LABEL: testv16i1_sext_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpcmpeqd (%rdi), %ymm0, %k0
+; AVX512VL-NEXT:    vpcmpeqd (%rsi), %ymm0, %k1
+; AVX512VL-NEXT:    kunpckbw %k0, %k1, %k1
+; AVX512VL-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512VL-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512F-LABEL: testv16i1_sext_v16i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
+; AVX512F-NEXT:    vmovdqa (%rsi), %ymm0
+; AVX512F-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    kunpckbw %k0, %k1, %k1
+; AVX512F-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT:    retq
+  %in = load <8 x i32>, <8 x i32>* %p
+  %cmp = icmp eq <8 x i32> %in, zeroinitializer
+  %in2 = load <8 x i32>, <8 x i32>* %q
+  %cmp2 = icmp eq <8 x i32> %in2, zeroinitializer
+  %concat = shufflevector <8 x i1> %cmp, <8 x i1> %cmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %ext = sext <16 x i1> %concat to <16 x i16>
+  ret <16 x i16> %ext
+}
+
+define <8 x i16> @testv8i1_zext_v8i16(<8 x i32>* %p) {
+; AVX256-LABEL: testv8i1_zext_v8i16:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX256-NEXT:    vpcmpeqd (%rdi), %ymm0, %k1
+; AVX256-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
+; AVX256-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX256-NEXT:    vzeroupper
+; AVX256-NEXT:    retq
+;
+; AVX512VL-LABEL: testv8i1_zext_v8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpcmpeqd (%rdi), %ymm0, %k1
+; AVX512VL-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
+; AVX512VL-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512F-LABEL: testv8i1_zext_v8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT:    vpcmpeqd (%rdi), %ymm0, %ymm0
+; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $15, %xmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+  %in = load <8 x i32>, <8 x i32>* %p
+  %cmp = icmp eq <8 x i32> %in, zeroinitializer
+  %ext = zext <8 x i1> %cmp to <8 x i16>
+  ret <8 x i16> %ext
+}
+
+define <16 x i8> @testv16i1_zext_v16i8(<8 x i32>* %p, <8 x i32>* %q) {
+; AVX256-LABEL: testv16i1_zext_v16i8:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX256-NEXT:    vpcmpeqd (%rdi), %ymm0, %k1
+; AVX256-NEXT:    vpcmpeqd (%rsi), %ymm0, %k2
+; AVX256-NEXT:    movl {{.*}}(%rip), %eax
+; AVX256-NEXT:    vpbroadcastd %eax, %ymm0 {%k2} {z}
+; AVX256-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX256-NEXT:    vmovdqa {{.*#+}} xmm1 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX256-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
+; AVX256-NEXT:    vpbroadcastd %eax, %ymm2 {%k1} {z}
+; AVX256-NEXT:    vpmovdw %ymm2, %xmm2
+; AVX256-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
+; AVX256-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX256-NEXT:    vzeroupper
+; AVX256-NEXT:    retq
+;
+; AVX512VL-LABEL: testv16i1_zext_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpcmpeqd (%rdi), %ymm0, %k0
+; AVX512VL-NEXT:    vpcmpeqd (%rsi), %ymm0, %k1
+; AVX512VL-NEXT:    kunpckbw %k0, %k1, %k1
+; AVX512VL-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512F-LABEL: testv16i1_zext_v16i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
+; AVX512F-NEXT:    vmovdqa (%rsi), %ymm0
+; AVX512F-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    kunpckbw %k0, %k1, %k1
+; AVX512F-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+  %in = load <8 x i32>, <8 x i32>* %p
+  %cmp = icmp eq <8 x i32> %in, zeroinitializer
+  %in2 = load <8 x i32>, <8 x i32>* %q
+  %cmp2 = icmp eq <8 x i32> %in2, zeroinitializer
+  %concat = shufflevector <8 x i1> %cmp, <8 x i1> %cmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %ext = zext <16 x i1> %concat to <16 x i8>
+  ret <16 x i8> %ext
+}
+
+define <16 x i16> @testv16i1_zext_v16i16(<8 x i32>* %p, <8 x i32>* %q) {
+; AVX256-LABEL: testv16i1_zext_v16i16:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX256-NEXT:    vpcmpeqd (%rdi), %ymm0, %k1
+; AVX256-NEXT:    vpcmpeqd (%rsi), %ymm0, %k2
+; AVX256-NEXT:    movl {{.*}}(%rip), %eax
+; AVX256-NEXT:    vpbroadcastd %eax, %ymm0 {%k1} {z}
+; AVX256-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX256-NEXT:    vpbroadcastd %eax, %ymm1 {%k2} {z}
+; AVX256-NEXT:    vpmovdw %ymm1, %xmm1
+; AVX256-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX256-NEXT:    retq
+;
+; AVX512VL-LABEL: testv16i1_zext_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpcmpeqd (%rdi), %ymm0, %k0
+; AVX512VL-NEXT:    vpcmpeqd (%rsi), %ymm0, %k1
+; AVX512VL-NEXT:    kunpckbw %k0, %k1, %k1
+; AVX512VL-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
+; AVX512VL-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512F-LABEL: testv16i1_zext_v16i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpcmpeqd %zmm1, %zmm0, %k0
+; AVX512F-NEXT:    vmovdqa (%rsi), %ymm0
+; AVX512F-NEXT:    vpcmpeqd %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    kunpckbw %k0, %k1, %k1
+; AVX512F-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
+; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT:    retq
+  %in = load <8 x i32>, <8 x i32>* %p
+  %cmp = icmp eq <8 x i32> %in, zeroinitializer
+  %in2 = load <8 x i32>, <8 x i32>* %q
+  %cmp2 = icmp eq <8 x i32> %in2, zeroinitializer
+  %concat = shufflevector <8 x i1> %cmp, <8 x i1> %cmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %ext = zext <16 x i1> %concat to <16 x i16>
+  ret <16 x i16> %ext
+}

Added: llvm/trunk/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll?rev=323016&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll (added)
+++ llvm/trunk/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll Fri Jan 19 16:26:12 2018
@@ -0,0 +1,194 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX256 --check-prefix=AVX256VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512NOBW --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX256 --check-prefix=AVX256VLBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512VLBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512NOBW --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512NOBW --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
+
+define <16 x i1> @shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0(<8 x i32>* %a, <8 x i32>* %b) {
+; AVX256VL-LABEL: shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
+; AVX256VL:       # %bb.0:
+; AVX256VL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX256VL-NEXT:    vpcmpeqd (%rdi), %ymm0, %k1
+; AVX256VL-NEXT:    vpcmpeqd (%rsi), %ymm0, %k2
+; AVX256VL-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
+; AVX256VL-NEXT:    vmovdqa32 %ymm0, %ymm1 {%k2} {z}
+; AVX256VL-NEXT:    vpmovdw %ymm1, %xmm1
+; AVX256VL-NEXT:    vmovdqa32 %ymm0, %ymm2 {%k1} {z}
+; AVX256VL-NEXT:    vpmovdw %ymm2, %xmm2
+; AVX256VL-NEXT:    vpblendw {{.*#+}} xmm3 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4],xmm2[5,6,7]
+; AVX256VL-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[6,7,12,13,4,5,8,9,6,7,14,15,14,15,0,1]
+; AVX256VL-NEXT:    vpmovsxwd %xmm3, %ymm3
+; AVX256VL-NEXT:    vpslld $31, %ymm3, %ymm3
+; AVX256VL-NEXT:    vptestmd %ymm3, %ymm3, %k1
+; AVX256VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
+; AVX256VL-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[6,7,12,13,2,3,14,15,6,7,6,7,14,15,0,1]
+; AVX256VL-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4],xmm1[5],xmm2[6,7]
+; AVX256VL-NEXT:    vpmovsxwd %xmm1, %ymm1
+; AVX256VL-NEXT:    vpslld $31, %ymm1, %ymm1
+; AVX256VL-NEXT:    vptestmd %ymm1, %ymm1, %k0
+; AVX256VL-NEXT:    kunpckbw %k1, %k0, %k0
+; AVX256VL-NEXT:    kshiftrw $8, %k0, %k2
+; AVX256VL-NEXT:    vmovdqa32 %ymm0, %ymm1 {%k2} {z}
+; AVX256VL-NEXT:    vpmovdw %ymm1, %xmm1
+; AVX256VL-NEXT:    vpacksswb %xmm0, %xmm1, %xmm1
+; AVX256VL-NEXT:    vmovdqa32 %ymm0, %ymm0 {%k1} {z}
+; AVX256VL-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX256VL-NEXT:    vpacksswb %xmm0, %xmm0, %xmm0
+; AVX256VL-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX256VL-NEXT:    vzeroupper
+; AVX256VL-NEXT:    retq
+;
+; AVX512VL-LABEL: shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpcmpeqd (%rdi), %ymm0, %k1
+; AVX512VL-NEXT:    vpcmpeqd (%rsi), %ymm0, %k2
+; AVX512VL-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; AVX512VL-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; AVX512VL-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [3,6,18,20,3,7,7,0,3,6,1,21,3,19,7,0]
+; AVX512VL-NEXT:    vpermi2d %zmm0, %zmm1, %zmm2
+; AVX512VL-NEXT:    vptestmd %zmm2, %zmm2, %k1
+; AVX512VL-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX256VLBW-LABEL: shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
+; AVX256VLBW:       # %bb.0:
+; AVX256VLBW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX256VLBW-NEXT:    vpcmpeqd (%rdi), %ymm0, %k0
+; AVX256VLBW-NEXT:    vpcmpeqd (%rsi), %ymm0, %k1
+; AVX256VLBW-NEXT:    vpmovm2w %k1, %ymm0
+; AVX256VLBW-NEXT:    vpmovm2w %k0, %ymm1
+; AVX256VLBW-NEXT:    vmovdqa {{.*#+}} ymm2 = [3,6,18,20,3,7,7,0,3,6,1,21,3,19,7,0]
+; AVX256VLBW-NEXT:    vpermi2w %ymm0, %ymm1, %ymm2
+; AVX256VLBW-NEXT:    vpmovw2m %ymm2, %k0
+; AVX256VLBW-NEXT:    vpmovm2b %k0, %xmm0
+; AVX256VLBW-NEXT:    vzeroupper
+; AVX256VLBW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpcmpeqd (%rdi), %ymm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd (%rsi), %ymm0, %k2
+; AVX512VLBW-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; AVX512VLBW-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [3,6,18,20,3,7,7,0,3,6,1,21,3,19,7,0]
+; AVX512VLBW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm2
+; AVX512VLBW-NEXT:    vptestmd %zmm2, %zmm2, %k0
+; AVX512VLBW-NEXT:    vpmovm2b %k0, %xmm0
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512F-LABEL: shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512F-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpcmpeqd %zmm2, %zmm0, %k1
+; AVX512F-NEXT:    vpcmpeqd %zmm2, %zmm1, %k2
+; AVX512F-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; AVX512F-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [3,6,18,20,3,7,7,0,3,6,1,21,3,19,7,0]
+; AVX512F-NEXT:    vpermi2d %zmm0, %zmm1, %zmm2
+; AVX512F-NEXT:    vptestmd %zmm2, %zmm2, %k1
+; AVX512F-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512BW-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512BW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512BW-NEXT:    vpcmpeqd %zmm2, %zmm0, %k1
+; AVX512BW-NEXT:    vpcmpeqd %zmm2, %zmm1, %k2
+; AVX512BW-NEXT:    vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; AVX512BW-NEXT:    vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [3,6,18,20,3,7,7,0,3,6,1,21,3,19,7,0]
+; AVX512BW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm2
+; AVX512BW-NEXT:    vptestmd %zmm2, %zmm2, %k0
+; AVX512BW-NEXT:    vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT:    # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+
+  %a1 = load <8 x i32>, <8 x i32>* %a
+  %b1 = load <8 x i32>, <8 x i32>* %b
+  %a2 = icmp eq <8 x i32> %a1, zeroinitializer
+  %b2 = icmp eq <8 x i32> %b1, zeroinitializer
+  %c = shufflevector <8 x i1> %a2, <8 x i1> %b2, <16 x i32> <i32 3, i32 6, i32 10, i32 12, i32 3, i32 7, i32 7, i32 0, i32 3, i32 6, i32 1, i32 13, i32 3, i32 11, i32 7, i32 0>
+  ret <16 x i1> %c
+}
+
+define <32 x i1> @shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0(<32 x i8> %a) {
+; AVX256VL-LABEL: shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
+; AVX256VL:       # %bb.0:
+; AVX256VL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX256VL-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm0
+; AVX256VL-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[3,6,u,12,3,7,7,0,3,6,1,13,3,u,7,0,u,u,22,u,u,u,u,u,u,u,u,u,u,21,u,u]
+; AVX256VL-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX256VL-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,6,u,u,u,u,u,u,u,u,u,u,5,u,u,19,22,u,28,19,23,23,16,19,22,17,29,19,u,23,16]
+; AVX256VL-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,0,255,255,255,255,255,255,255,255,255,255,0,255,255,0,0,255,0,0,0,0,0,0,0,0,0,0,255,0,0]
+; AVX256VL-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX256VL-NEXT:    retq
+;
+; AVX512NOBW-LABEL: shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
+; AVX512NOBW:       # %bb.0:
+; AVX512NOBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512NOBW-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm0
+; AVX512NOBW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[3,6,u,12,3,7,7,0,3,6,1,13,3,u,7,0,u,u,22,u,u,u,u,u,u,u,u,u,u,21,u,u]
+; AVX512NOBW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX512NOBW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,6,u,u,u,u,u,u,u,u,u,u,5,u,u,19,22,u,28,19,23,23,16,19,22,17,29,19,u,23,16]
+; AVX512NOBW-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,0,255,255,255,255,255,255,255,255,255,255,0,255,255,0,0,255,0,0,0,0,0,0,0,0,0,0,255,0,0]
+; AVX512NOBW-NEXT:    vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
+; AVX512NOBW-NEXT:    retq
+;
+; AVX256VLBW-LABEL: shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
+; AVX256VLBW:       # %bb.0:
+; AVX256VLBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX256VLBW-NEXT:    vpcmpeqb %ymm1, %ymm0, %k0
+; AVX256VLBW-NEXT:    vpmovm2b %k0, %ymm0
+; AVX256VLBW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
+; AVX256VLBW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[3,6,u,12,3,7,7,0,3,6,1,13,3,u,7,0,u,u,22,u,u,u,u,u,u,u,u,u,u,21,u,u]
+; AVX256VLBW-NEXT:    movl $-537190396, %eax # imm = 0xDFFB2004
+; AVX256VLBW-NEXT:    kmovd %eax, %k1
+; AVX256VLBW-NEXT:    vpshufb {{.*#+}} ymm0 {%k1} = ymm1[u,u,6,u,u,u,u,u,u,u,u,u,u,5,u,u,19,22,u,28,19,23,23,16,19,22,17,29,19,u,23,16]
+; AVX256VLBW-NEXT:    vpmovb2m %ymm0, %k0
+; AVX256VLBW-NEXT:    vpmovm2b %k0, %ymm0
+; AVX256VLBW-NEXT:    retq
+;
+; AVX512VLBW-LABEL: shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpcmpeqb %ymm1, %ymm0, %k0
+; AVX512VLBW-NEXT:    vpmovm2w %k0, %zmm0
+; AVX512VLBW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0,3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0]
+; AVX512VLBW-NEXT:    vpermw %zmm0, %zmm1, %zmm0
+; AVX512VLBW-NEXT:    vpmovw2m %zmm0, %k0
+; AVX512VLBW-NEXT:    vpmovm2b %k0, %ymm0
+; AVX512VLBW-NEXT:    retq
+;
+; AVX512BW-LABEL: shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpcmpeqb %zmm1, %zmm0, %k0
+; AVX512BW-NEXT:    vpmovm2w %k0, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0,3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0]
+; AVX512BW-NEXT:    vpermw %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vpmovw2m %zmm0, %k0
+; AVX512BW-NEXT:    vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT:    # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT:    retq
+  %cmp = icmp eq <32 x i8> %a, zeroinitializer
+  %b = shufflevector <32 x i1> %cmp, <32 x i1> undef, <32 x i32> <i32 3, i32 6, i32 22, i32 12, i32 3, i32 7, i32 7, i32 0, i32 3, i32 6, i32 1, i32 13, i32 3, i32 21, i32 7, i32 0, i32 3, i32 6, i32 22, i32 12, i32 3, i32 7, i32 7, i32 0, i32 3, i32 6, i32 1, i32 13, i32 3, i32 21, i32 7, i32 0>
+  ret <32 x i1> %b
+}
+

Added: llvm/trunk/test/CodeGen/X86/prefer-avx256-popcnt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/prefer-avx256-popcnt.ll?rev=323016&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/prefer-avx256-popcnt.ll (added)
+++ llvm/trunk/test/CodeGen/X86/prefer-avx256-popcnt.ll Fri Jan 19 16:26:12 2018
@@ -0,0 +1,105 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512vpopcntdq,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX256
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512vpopcntdq,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vpopcntdq,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vpopcntdq,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512F
+
+define <8 x i16> @testv8i16(<8 x i16> %in) {
+; AVX256-LABEL: testv8i16:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX256-NEXT:    vpopcntd %ymm0, %ymm0
+; AVX256-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX256-NEXT:    vzeroupper
+; AVX256-NEXT:    retq
+;
+; AVX512VL-LABEL: testv8i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512VL-NEXT:    vpopcntd %ymm0, %ymm0
+; AVX512VL-NEXT:    vpmovdw %ymm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512F-LABEL: testv8i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512F-NEXT:    vpopcntd %zmm0, %zmm0
+; AVX512F-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512F-NEXT:    # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+  %out = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %in)
+  ret <8 x i16> %out
+}
+
+define <16 x i8> @testv16i8(<16 x i8> %in) {
+; AVX256-LABEL: testv16i8:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX256-NEXT:    vpand %xmm1, %xmm0, %xmm2
+; AVX256-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX256-NEXT:    vpshufb %xmm2, %xmm3, %xmm2
+; AVX256-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX256-NEXT:    vpand %xmm1, %xmm0, %xmm0
+; AVX256-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX256-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
+; AVX256-NEXT:    retq
+;
+; AVX512-LABEL: testv16i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT:    vpopcntd %zmm0, %zmm0
+; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    retq
+  %out = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %in)
+  ret <16 x i8> %out
+}
+
+define <16 x i16> @testv16i16(<16 x i16> %in) {
+; AVX256-LABEL: testv16i16:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX256-NEXT:    vpand %ymm1, %ymm0, %ymm2
+; AVX256-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; AVX256-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
+; AVX256-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX256-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; AVX256-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX256-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
+; AVX256-NEXT:    vpsllw $8, %ymm0, %ymm1
+; AVX256-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX256-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX256-NEXT:    retq
+;
+; AVX512-LABEL: testv16i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512-NEXT:    vpopcntd %zmm0, %zmm0
+; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512-NEXT:    retq
+  %out = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %in)
+  ret <16 x i16> %out
+}
+
+define <32 x i8> @testv32i8(<32 x i8> %in) {
+; CHECK-LABEL: testv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; CHECK-NEXT:    vpand %ymm1, %ymm0, %ymm2
+; CHECK-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
+; CHECK-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
+; CHECK-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; CHECK-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; CHECK-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:    retq
+  %out = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> %in)
+  ret <32 x i8> %out
+}
+
+declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>)
+declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>)
+declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>)
+declare <32 x i8> @llvm.ctpop.v32i8(<32 x i8>)

Added: llvm/trunk/test/CodeGen/X86/prefer-avx256-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/prefer-avx256-shift.ll?rev=323016&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/prefer-avx256-shift.ll (added)
+++ llvm/trunk/test/CodeGen/X86/prefer-avx256-shift.ll Fri Jan 19 16:26:12 2018
@@ -0,0 +1,475 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+prefer-256-bit | FileCheck %s --check-prefix=ALL --check-prefix=AVX256 --check-prefix=AVX256BW --check-prefix=AVX256BWVL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,-prefer-256-bit | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW --check-prefix=AVX512BWVL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+prefer-256-bit | FileCheck %s --check-prefix=ALL --check-prefix=AVX256 --check-prefix=AVX256VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,-prefer-256-bit | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+prefer-256-bit | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW --check-prefix=AVX512BWNOVL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,-prefer-256-bit | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW --check-prefix=AVX512BWNOVL
+
+define <32 x i8> @var_shl_v32i8(<32 x i8> %a, <32 x i8> %b) {
+; AVX256-LABEL: var_shl_v32i8:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX256-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX256-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX256-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX256-NEXT:    vpsllw $2, %ymm0, %ymm2
+; AVX256-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX256-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX256-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX256-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX256-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX256-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX256-NEXT:    retq
+;
+; AVX512BW-LABEL: var_shl_v32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VL-LABEL: var_shl_v32i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw $2, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+  %shift = shl <32 x i8> %a, %b
+  ret <32 x i8> %shift
+}
+
+define <16 x i16> @var_shl_v16i16(<16 x i16> %a, <16 x i16> %b) {
+; AVX256BW-LABEL: var_shl_v16i16:
+; AVX256BW:       # %bb.0:
+; AVX256BW-NEXT:    vpsllvw %ymm1, %ymm0, %ymm0
+; AVX256BW-NEXT:    retq
+;
+; AVX512BWVL-LABEL: var_shl_v16i16:
+; AVX512BWVL:       # %bb.0:
+; AVX512BWVL-NEXT:    vpsllvw %ymm1, %ymm0, %ymm0
+; AVX512BWVL-NEXT:    retq
+;
+; AVX256VL-LABEL: var_shl_v16i16:
+; AVX256VL:       # %bb.0:
+; AVX256VL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX256VL-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
+; AVX256VL-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
+; AVX256VL-NEXT:    vpsllvd %ymm3, %ymm4, %ymm3
+; AVX256VL-NEXT:    vpsrld $16, %ymm3, %ymm3
+; AVX256VL-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
+; AVX256VL-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
+; AVX256VL-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0
+; AVX256VL-NEXT:    vpsrld $16, %ymm0, %ymm0
+; AVX256VL-NEXT:    vpackusdw %ymm3, %ymm0, %ymm0
+; AVX256VL-NEXT:    retq
+;
+; AVX512VL-LABEL: var_shl_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT:    vpsllvd %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BWNOVL-LABEL: var_shl_v16i16:
+; AVX512BWNOVL:       # %bb.0:
+; AVX512BWNOVL-NEXT:    # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512BWNOVL-NEXT:    # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BWNOVL-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BWNOVL-NEXT:    # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BWNOVL-NEXT:    retq
+  %shift = shl <16 x i16> %a, %b
+  ret <16 x i16> %shift
+}
+
+define <16 x i8> @var_shl_v16i8(<16 x i8> %a, <16 x i8> %b) {
+; AVX256BW-LABEL: var_shl_v16i8:
+; AVX256BW:       # %bb.0:
+; AVX256BW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX256BW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX256BW-NEXT:    vpsllvw %ymm1, %ymm0, %ymm0
+; AVX256BW-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX256BW-NEXT:    vzeroupper
+; AVX256BW-NEXT:    retq
+;
+; AVX512BWVL-LABEL: var_shl_v16i8:
+; AVX512BWVL:       # %bb.0:
+; AVX512BWVL-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BWVL-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BWVL-NEXT:    vpsllvw %ymm1, %ymm0, %ymm0
+; AVX512BWVL-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512BWVL-NEXT:    vzeroupper
+; AVX512BWVL-NEXT:    retq
+;
+; AVX256VL-LABEL: var_shl_v16i8:
+; AVX256VL:       # %bb.0:
+; AVX256VL-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX256VL-NEXT:    vpsllw $4, %xmm0, %xmm2
+; AVX256VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX256VL-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX256VL-NEXT:    vpsllw $2, %xmm0, %xmm2
+; AVX256VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX256VL-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX256VL-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX256VL-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
+; AVX256VL-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX256VL-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX256VL-NEXT:    retq
+;
+; AVX512VL-LABEL: var_shl_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsllvd %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BWNOVL-LABEL: var_shl_v16i8:
+; AVX512BWNOVL:       # %bb.0:
+; AVX512BWNOVL-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BWNOVL-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BWNOVL-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BWNOVL-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BWNOVL-NEXT:    # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BWNOVL-NEXT:    vzeroupper
+; AVX512BWNOVL-NEXT:    retq
+  %shift = shl <16 x i8> %a, %b
+  ret <16 x i8> %shift
+}
+
+define <32 x i8> @var_lshr_v32i8(<32 x i8> %a, <32 x i8> %b) {
+; AVX256-LABEL: var_lshr_v32i8:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX256-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX256-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX256-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX256-NEXT:    vpsrlw $2, %ymm0, %ymm2
+; AVX256-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX256-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX256-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX256-NEXT:    vpsrlw $1, %ymm0, %ymm2
+; AVX256-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX256-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX256-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX256-NEXT:    retq
+;
+; AVX512BW-LABEL: var_lshr_v32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VL-LABEL: var_lshr_v32i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $2, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $1, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+  %shift = lshr <32 x i8> %a, %b
+  ret <32 x i8> %shift
+}
+
+define <16 x i16> @var_lshr_v16i16(<16 x i16> %a, <16 x i16> %b) {
+; AVX256BW-LABEL: var_lshr_v16i16:
+; AVX256BW:       # %bb.0:
+; AVX256BW-NEXT:    vpsrlvw %ymm1, %ymm0, %ymm0
+; AVX256BW-NEXT:    retq
+;
+; AVX512BWVL-LABEL: var_lshr_v16i16:
+; AVX512BWVL:       # %bb.0:
+; AVX512BWVL-NEXT:    vpsrlvw %ymm1, %ymm0, %ymm0
+; AVX512BWVL-NEXT:    retq
+;
+; AVX256VL-LABEL: var_lshr_v16i16:
+; AVX256VL:       # %bb.0:
+; AVX256VL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX256VL-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
+; AVX256VL-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
+; AVX256VL-NEXT:    vpsrlvd %ymm3, %ymm4, %ymm3
+; AVX256VL-NEXT:    vpsrld $16, %ymm3, %ymm3
+; AVX256VL-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
+; AVX256VL-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
+; AVX256VL-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX256VL-NEXT:    vpsrld $16, %ymm0, %ymm0
+; AVX256VL-NEXT:    vpackusdw %ymm3, %ymm0, %ymm0
+; AVX256VL-NEXT:    retq
+;
+; AVX512VL-LABEL: var_lshr_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BWNOVL-LABEL: var_lshr_v16i16:
+; AVX512BWNOVL:       # %bb.0:
+; AVX512BWNOVL-NEXT:    # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512BWNOVL-NEXT:    # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BWNOVL-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BWNOVL-NEXT:    # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BWNOVL-NEXT:    retq
+  %shift = lshr <16 x i16> %a, %b
+  ret <16 x i16> %shift
+}
+
+define <16 x i8> @var_lshr_v16i8(<16 x i8> %a, <16 x i8> %b) {
+; AVX256BW-LABEL: var_lshr_v16i8:
+; AVX256BW:       # %bb.0:
+; AVX256BW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX256BW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX256BW-NEXT:    vpsrlvw %ymm1, %ymm0, %ymm0
+; AVX256BW-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX256BW-NEXT:    vzeroupper
+; AVX256BW-NEXT:    retq
+;
+; AVX512BWVL-LABEL: var_lshr_v16i8:
+; AVX512BWVL:       # %bb.0:
+; AVX512BWVL-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BWVL-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BWVL-NEXT:    vpsrlvw %ymm1, %ymm0, %ymm0
+; AVX512BWVL-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512BWVL-NEXT:    vzeroupper
+; AVX512BWVL-NEXT:    retq
+;
+; AVX256VL-LABEL: var_lshr_v16i8:
+; AVX256VL:       # %bb.0:
+; AVX256VL-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX256VL-NEXT:    vpsrlw $4, %xmm0, %xmm2
+; AVX256VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX256VL-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX256VL-NEXT:    vpsrlw $2, %xmm0, %xmm2
+; AVX256VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX256VL-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX256VL-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX256VL-NEXT:    vpsrlw $1, %xmm0, %xmm2
+; AVX256VL-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX256VL-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX256VL-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX256VL-NEXT:    retq
+;
+; AVX512VL-LABEL: var_lshr_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512VL-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BWNOVL-LABEL: var_lshr_v16i8:
+; AVX512BWNOVL:       # %bb.0:
+; AVX512BWNOVL-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BWNOVL-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BWNOVL-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BWNOVL-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BWNOVL-NEXT:    # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BWNOVL-NEXT:    vzeroupper
+; AVX512BWNOVL-NEXT:    retq
+  %shift = lshr <16 x i8> %a, %b
+  ret <16 x i8> %shift
+}
+
+define <32 x i8> @var_ashr_v32i8(<32 x i8> %a, <32 x i8> %b) {
+; AVX256-LABEL: var_ashr_v32i8:
+; AVX256:       # %bb.0:
+; AVX256-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX256-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX256-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX256-NEXT:    vpsraw $4, %ymm3, %ymm4
+; AVX256-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX256-NEXT:    vpsraw $2, %ymm3, %ymm4
+; AVX256-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
+; AVX256-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX256-NEXT:    vpsraw $1, %ymm3, %ymm4
+; AVX256-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
+; AVX256-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; AVX256-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX256-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX256-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX256-NEXT:    vpsraw $4, %ymm0, %ymm3
+; AVX256-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX256-NEXT:    vpsraw $2, %ymm0, %ymm3
+; AVX256-NEXT:    vpaddw %ymm1, %ymm1, %ymm1
+; AVX256-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX256-NEXT:    vpsraw $1, %ymm0, %ymm3
+; AVX256-NEXT:    vpaddw %ymm1, %ymm1, %ymm1
+; AVX256-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX256-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX256-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
+; AVX256-NEXT:    retq
+;
+; AVX512BW-LABEL: var_ashr_v32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT:    vpmovsxbw %ymm0, %zmm0
+; AVX512BW-NEXT:    vpsravw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    retq
+;
+; AVX512VL-LABEL: var_ashr_v32i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512VL-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-NEXT:    vpsraw $4, %ymm3, %ymm4
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsraw $2, %ymm3, %ymm4
+; AVX512VL-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsraw $1, %ymm3, %ymm4
+; AVX512VL-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512VL-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-NEXT:    vpsraw $4, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsraw $2, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpaddw %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsraw $1, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpaddw %ymm1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+  %shift = ashr <32 x i8> %a, %b
+  ret <32 x i8> %shift
+}
+
+define <16 x i16> @var_ashr_v16i16(<16 x i16> %a, <16 x i16> %b) {
+; AVX256BW-LABEL: var_ashr_v16i16:
+; AVX256BW:       # %bb.0:
+; AVX256BW-NEXT:    vpsravw %ymm1, %ymm0, %ymm0
+; AVX256BW-NEXT:    retq
+;
+; AVX512BWVL-LABEL: var_ashr_v16i16:
+; AVX512BWVL:       # %bb.0:
+; AVX512BWVL-NEXT:    vpsravw %ymm1, %ymm0, %ymm0
+; AVX512BWVL-NEXT:    retq
+;
+; AVX256VL-LABEL: var_ashr_v16i16:
+; AVX256VL:       # %bb.0:
+; AVX256VL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX256VL-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
+; AVX256VL-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
+; AVX256VL-NEXT:    vpsravd %ymm3, %ymm4, %ymm3
+; AVX256VL-NEXT:    vpsrld $16, %ymm3, %ymm3
+; AVX256VL-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
+; AVX256VL-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
+; AVX256VL-NEXT:    vpsravd %ymm1, %ymm0, %ymm0
+; AVX256VL-NEXT:    vpsrld $16, %ymm0, %ymm0
+; AVX256VL-NEXT:    vpackusdw %ymm3, %ymm0, %ymm0
+; AVX256VL-NEXT:    retq
+;
+; AVX512VL-LABEL: var_ashr_v16i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT:    vpmovsxwd %ymm0, %zmm0
+; AVX512VL-NEXT:    vpsravd %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BWNOVL-LABEL: var_ashr_v16i16:
+; AVX512BWNOVL:       # %bb.0:
+; AVX512BWNOVL-NEXT:    # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512BWNOVL-NEXT:    # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BWNOVL-NEXT:    vpsravw %zmm1, %zmm0, %zmm0
+; AVX512BWNOVL-NEXT:    # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BWNOVL-NEXT:    retq
+  %shift = ashr <16 x i16> %a, %b
+  ret <16 x i16> %shift
+}
+
+define <16 x i8> @var_ashr_v16i8(<16 x i8> %a, <16 x i8> %b) {
+; AVX256BW-LABEL: var_ashr_v16i8:
+; AVX256BW:       # %bb.0:
+; AVX256BW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX256BW-NEXT:    vpmovsxbw %xmm0, %ymm0
+; AVX256BW-NEXT:    vpsravw %ymm1, %ymm0, %ymm0
+; AVX256BW-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX256BW-NEXT:    vzeroupper
+; AVX256BW-NEXT:    retq
+;
+; AVX512BWVL-LABEL: var_ashr_v16i8:
+; AVX512BWVL:       # %bb.0:
+; AVX512BWVL-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BWVL-NEXT:    vpmovsxbw %xmm0, %ymm0
+; AVX512BWVL-NEXT:    vpsravw %ymm1, %ymm0, %ymm0
+; AVX512BWVL-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512BWVL-NEXT:    vzeroupper
+; AVX512BWVL-NEXT:    retq
+;
+; AVX256VL-LABEL: var_ashr_v16i8:
+; AVX256VL:       # %bb.0:
+; AVX256VL-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX256VL-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX256VL-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX256VL-NEXT:    vpsraw $4, %xmm3, %xmm4
+; AVX256VL-NEXT:    vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX256VL-NEXT:    vpsraw $2, %xmm3, %xmm4
+; AVX256VL-NEXT:    vpaddw %xmm2, %xmm2, %xmm2
+; AVX256VL-NEXT:    vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX256VL-NEXT:    vpsraw $1, %xmm3, %xmm4
+; AVX256VL-NEXT:    vpaddw %xmm2, %xmm2, %xmm2
+; AVX256VL-NEXT:    vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
+; AVX256VL-NEXT:    vpsrlw $8, %xmm2, %xmm2
+; AVX256VL-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX256VL-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX256VL-NEXT:    vpsraw $4, %xmm0, %xmm3
+; AVX256VL-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX256VL-NEXT:    vpsraw $2, %xmm0, %xmm3
+; AVX256VL-NEXT:    vpaddw %xmm1, %xmm1, %xmm1
+; AVX256VL-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX256VL-NEXT:    vpsraw $1, %xmm0, %xmm3
+; AVX256VL-NEXT:    vpaddw %xmm1, %xmm1, %xmm1
+; AVX256VL-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX256VL-NEXT:    vpsrlw $8, %xmm0, %xmm0
+; AVX256VL-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX256VL-NEXT:    retq
+;
+; AVX512VL-LABEL: var_ashr_v16i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512VL-NEXT:    vpmovsxbd %xmm0, %zmm0
+; AVX512VL-NEXT:    vpsravd %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
+;
+; AVX512BWNOVL-LABEL: var_ashr_v16i8:
+; AVX512BWNOVL:       # %bb.0:
+; AVX512BWNOVL-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BWNOVL-NEXT:    vpmovsxbw %xmm0, %ymm0
+; AVX512BWNOVL-NEXT:    vpsravw %zmm1, %zmm0, %zmm0
+; AVX512BWNOVL-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BWNOVL-NEXT:    # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BWNOVL-NEXT:    vzeroupper
+; AVX512BWNOVL-NEXT:    retq
+  %shift = ashr <16 x i8> %a, %b
+  ret <16 x i8> %shift
+}

Added: llvm/trunk/test/CodeGen/X86/prefer-avx256-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/prefer-avx256-trunc.ll?rev=323016&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/prefer-avx256-trunc.ll (added)
+++ llvm/trunk/test/CodeGen/X86/prefer-avx256-trunc.ll Fri Jan 19 16:26:12 2018
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX256 --check-prefix=AVX256NOBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512NOBW --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512NOBW --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512NOBW --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX256 --check-prefix=AVX256BWVL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX256 --check-prefix=AVX512BWVL
+
+define <16 x i8> @testv16i16_trunc_v16i8(<16 x i16> %x) {
+; AVX256NOBW-LABEL: testv16i16_trunc_v16i8:
+; AVX256NOBW:       # %bb.0:
+; AVX256NOBW-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX256NOBW-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; AVX256NOBW-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX256NOBW-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX256NOBW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX256NOBW-NEXT:    vzeroupper
+; AVX256NOBW-NEXT:    retq
+;
+; AVX512NOBW-LABEL: testv16i16_trunc_v16i8:
+; AVX512NOBW:       # %bb.0:
+; AVX512NOBW-NEXT:    vpmovsxwd %ymm0, %zmm0
+; AVX512NOBW-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512NOBW-NEXT:    vzeroupper
+; AVX512NOBW-NEXT:    retq
+;
+; AVX512BW-LABEL: testv16i16_trunc_v16i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT:    # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
+;
+; AVX256BWVL-LABEL: testv16i16_trunc_v16i8:
+; AVX256BWVL:       # %bb.0:
+; AVX256BWVL-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX256BWVL-NEXT:    vzeroupper
+; AVX256BWVL-NEXT:    retq
+;
+; AVX512BWVL-LABEL: testv16i16_trunc_v16i8:
+; AVX512BWVL:       # %bb.0:
+; AVX512BWVL-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512BWVL-NEXT:    vzeroupper
+; AVX512BWVL-NEXT:    retq
+  %trunc = trunc <16 x i16> %x to <16 x i8>
+  ret <16 x i8> %trunc
+}

Added: llvm/trunk/test/CodeGen/X86/prefer-avx256-wide-mul.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/prefer-avx256-wide-mul.ll?rev=323016&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/prefer-avx256-wide-mul.ll (added)
+++ llvm/trunk/test/CodeGen/X86/prefer-avx256-wide-mul.ll Fri Jan 19 16:26:12 2018
@@ -0,0 +1,44 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX256BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,-prefer-256-bit | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512BW
+
+define <32 x i8> @test_div7_32i8(<32 x i8> %a) {
+; AVX256BW-LABEL: test_div7_32i8:
+; AVX256BW:       # %bb.0:
+; AVX256BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX256BW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX256BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
+; AVX256BW-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX256BW-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX256BW-NEXT:    vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX256BW-NEXT:    vpmullw %ymm2, %ymm3, %ymm2
+; AVX256BW-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX256BW-NEXT:    vperm2i128 {{.*#+}} ymm3 = ymm2[2,3],ymm1[2,3]
+; AVX256BW-NEXT:    vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX256BW-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX256BW-NEXT:    vpsubb %ymm1, %ymm0, %ymm0
+; AVX256BW-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX256BW-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX256BW-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; AVX256BW-NEXT:    vpsrlw $2, %ymm0, %ymm0
+; AVX256BW-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX256BW-NEXT:    retq
+;
+; AVX512BW-LABEL: test_div7_32i8:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT:    vpmullw {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsrlw $8, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT:    vpsubb %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpsrlw $2, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-NEXT:    retq
+  %res = udiv <32 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
+  ret <32 x i8> %res
+}




More information about the llvm-commits mailing list