[llvm] r256324 - AVX512BW: Enable packed word shift for 512bit vector. Enable lowering scalar immidiate shift v64i8 .Fix predicate for AVX1/2 shifts.

Igor Breger via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 23 00:06:51 PST 2015


Author: ibreger
Date: Wed Dec 23 02:06:50 2015
New Revision: 256324

URL: http://llvm.org/viewvc/llvm-project?rev=256324&view=rev
Log:
AVX512BW: Enable packed word shift for 512bit vector. Enable lowering scalar immidiate shift v64i8 .Fix predicate for AVX1/2 shifts.

Differential Revision: http://reviews.llvm.org/D15713

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/lib/Target/X86/X86InstrAVX512.td
    llvm/trunk/lib/Target/X86/X86InstrSSE.td
    llvm/trunk/test/CodeGen/X86/avx-isa-check.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-ashr-512.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-lshr-512.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll
    llvm/trunk/test/CodeGen/X86/vector-shift-shl-512.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=256324&r1=256323&r2=256324&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Wed Dec 23 02:06:50 2015
@@ -1689,6 +1689,9 @@ X86TargetLowering::X86TargetLowering(con
     for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
       setOperationAction(ISD::BUILD_VECTOR,        VT, Custom);
       setOperationAction(ISD::VSELECT,             VT, Legal);
+      setOperationAction(ISD::SRL,                 VT, Custom);
+      setOperationAction(ISD::SHL,                 VT, Custom);
+      setOperationAction(ISD::SRA,                 VT, Custom);
 
       setOperationAction(ISD::AND,    VT, Promote);
       AddPromotedToType (ISD::AND,    VT, MVT::v8i64);
@@ -18358,7 +18361,9 @@ static SDValue LowerScalarImmediateShift
           Op.getOpcode() == ISD::SRA && !Subtarget->hasXOP())
         return ArithmeticShiftRight64(ShiftAmt);
 
-      if (VT == MVT::v16i8 || (Subtarget->hasInt256() && VT == MVT::v32i8)) {
+      if (VT == MVT::v16i8 ||
+          (Subtarget->hasInt256() && VT == MVT::v32i8) ||
+          VT == MVT::v64i8) {
         unsigned NumElts = VT.getVectorNumElements();
         MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
 
@@ -18382,10 +18387,8 @@ static SDValue LowerScalarImmediateShift
                                                    R, ShiftAmt, DAG);
           SHL = DAG.getBitcast(VT, SHL);
           // Zero out the rightmost bits.
-          SmallVector<SDValue, 32> V(
-              NumElts, DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, MVT::i8));
           return DAG.getNode(ISD::AND, dl, VT, SHL,
-                             DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
+                             DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, VT));
         }
         if (Op.getOpcode() == ISD::SRL) {
           // Make a large shift.
@@ -18393,18 +18396,14 @@ static SDValue LowerScalarImmediateShift
                                                    R, ShiftAmt, DAG);
           SRL = DAG.getBitcast(VT, SRL);
           // Zero out the leftmost bits.
-          SmallVector<SDValue, 32> V(
-              NumElts, DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, MVT::i8));
           return DAG.getNode(ISD::AND, dl, VT, SRL,
-                             DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
+                             DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
         }
         if (Op.getOpcode() == ISD::SRA) {
           // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
           SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
-          SmallVector<SDValue, 32> V(NumElts,
-                                     DAG.getConstant(128 >> ShiftAmt, dl,
-                                                     MVT::i8));
-          SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
+
+          SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
           Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
           Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
           return Res;

Modified: llvm/trunk/lib/Target/X86/X86InstrAVX512.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrAVX512.td?rev=256324&r1=256323&r2=256324&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrAVX512.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrAVX512.td Wed Dec 23 02:06:50 2015
@@ -4152,6 +4152,27 @@ multiclass avx512_var_shift_types<bits<8
                                  avx512vl_i64_info>, VEX_W;
 }
 
+// Use 512bit version to implement 128/256 bit in case NoVLX.  
+multiclass avx512_var_shift_w_lowering<AVX512VLVectorVTInfo _, SDNode OpNode> {
+  let Predicates = [HasBWI, NoVLX] in {
+  def : Pat<(_.info256.VT (OpNode (_.info256.VT _.info256.RC:$src1), 
+                                  (_.info256.VT _.info256.RC:$src2))),
+            (EXTRACT_SUBREG                
+                (!cast<Instruction>(NAME#"WZrr")
+                    (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR256X:$src1, sub_ymm),
+                    (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR256X:$src2, sub_ymm)),
+             sub_ymm)>;
+
+  def : Pat<(_.info128.VT (OpNode (_.info128.VT _.info128.RC:$src1), 
+                                  (_.info128.VT _.info128.RC:$src2))),
+            (EXTRACT_SUBREG                
+                (!cast<Instruction>(NAME#"WZrr")
+                    (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR128X:$src1, sub_xmm),
+                    (INSERT_SUBREG (_.info512.VT (IMPLICIT_DEF)), VR128X:$src2, sub_xmm)),
+             sub_xmm)>;
+  }
+}
+
 multiclass avx512_var_shift_w<bits<8> opc, string OpcodeStr,
                                  SDNode OpNode> {
   let Predicates = [HasBWI] in
@@ -4167,11 +4188,14 @@ multiclass avx512_var_shift_w<bits<8> op
 }
 
 defm VPSLLV : avx512_var_shift_types<0x47, "vpsllv", shl>,
-              avx512_var_shift_w<0x12, "vpsllvw", shl>;
+              avx512_var_shift_w<0x12, "vpsllvw", shl>,
+              avx512_var_shift_w_lowering<avx512vl_i16_info, shl>;
 defm VPSRAV : avx512_var_shift_types<0x46, "vpsrav", sra>,
-              avx512_var_shift_w<0x11, "vpsravw", sra>;
+              avx512_var_shift_w<0x11, "vpsravw", sra>,
+              avx512_var_shift_w_lowering<avx512vl_i16_info, sra>;
 defm VPSRLV : avx512_var_shift_types<0x45, "vpsrlv", srl>,
-              avx512_var_shift_w<0x10, "vpsrlvw", srl>;
+              avx512_var_shift_w<0x10, "vpsrlvw", srl>,
+              avx512_var_shift_w_lowering<avx512vl_i16_info, srl>;
 defm VPRORV : avx512_var_shift_types<0x14, "vprorv", rotr>;
 defm VPROLV : avx512_var_shift_types<0x15, "vprolv", rotl>;
 

Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=256324&r1=256323&r2=256324&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Wed Dec 23 02:06:50 2015
@@ -4098,9 +4098,6 @@ defm PMULUDQ : PDI_binop_rm2<0xF4, "pmul
 //===---------------------------------------------------------------------===//
 
 let Predicates = [HasAVX, NoVLX] in {
-defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
-                            VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
-                            SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
 defm VPSLLD : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
                             VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
                             SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
@@ -4108,9 +4105,6 @@ defm VPSLLQ : PDI_binop_rmi<0xF3, 0x73,
                             VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
                             SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
 
-defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
-                            VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
-                            SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
 defm VPSRLD : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
                             VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
                             SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
@@ -4118,13 +4112,23 @@ defm VPSRLQ : PDI_binop_rmi<0xD3, 0x73,
                             VR128, v2i64, v2i64, bc_v2i64, loadv2i64,
                             SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
 
-defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
-                            VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
-                            SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
 defm VPSRAD : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
                             VR128, v4i32, v4i32, bc_v4i32, loadv2i64,
                             SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
-} // Predicates = [HasAVX]
+} // Predicates = [HasAVX, NoVLX]
+
+let Predicates = [HasAVX, NoVLX_Or_NoBWI] in {
+defm VPSLLW : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
+                            VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
+                            SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+defm VPSRLW : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
+                            VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
+                            SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+defm VPSRAW : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
+                            VR128, v8i16, v8i16, bc_v8i16, loadv2i64,
+                            SSE_INTSHIFT_ITINS_P, 0>, VEX_4V;
+} // Predicates = [HasAVX, NoVLX_Or_NoBWI]
+
 
 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift] ,
                                     Predicates = [HasAVX, NoVLX_Or_NoBWI]in {
@@ -4145,9 +4149,6 @@ let ExeDomain = SSEPackedInt, SchedRW =
 } // Predicates = [HasAVX, NoVLX_Or_NoBWI]
 
 let Predicates = [HasAVX2, NoVLX] in {
-defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
-                             VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
-                             SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
 defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli,
                              VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
                              SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
@@ -4155,9 +4156,6 @@ defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73,
                              VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
                              SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
 
-defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
-                             VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
-                             SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
 defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli,
                              VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
                              SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
@@ -4165,13 +4163,22 @@ defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73,
                              VR256, v4i64, v2i64, bc_v2i64, loadv2i64,
                              SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
 
-defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
-                             VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
-                             SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
 defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai,
                              VR256, v8i32, v4i32, bc_v4i32, loadv2i64,
                              SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
-}// Predicates = [HasAVX2]
+}// Predicates = [HasAVX2, NoVLX]
+
+let Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
+defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli,
+                             VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
+                             SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
+defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli,
+                             VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
+                             SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
+defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai,
+                             VR256, v16i16, v8i16, bc_v8i16, loadv2i64,
+                             SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L;
+}// Predicates = [HasAVX2, NoVLX_Or_NoBWI]
 
 let ExeDomain = SSEPackedInt, SchedRW = [WriteVecShift], hasSideEffects = 0 ,
                                     Predicates = [HasAVX2, NoVLX_Or_NoBWI] in {
@@ -8813,12 +8820,13 @@ multiclass avx2_var_shift<bits<8> opc, s
              VEX_4V, VEX_L, Sched<[WriteVarVecShiftLd, ReadAfterLd]>;
 }
 
-defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
-defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, v2i64, v4i64>, VEX_W;
-defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
-defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
-defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
-
+let Predicates = [HasAVX2, NoVLX] in {
+  defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>;
+  defm VPSLLVQ : avx2_var_shift<0x47, "vpsllvq", shl, v2i64, v4i64>, VEX_W;
+  defm VPSRLVD : avx2_var_shift<0x45, "vpsrlvd", srl, v4i32, v8i32>;
+  defm VPSRLVQ : avx2_var_shift<0x45, "vpsrlvq", srl, v2i64, v4i64>, VEX_W;
+  defm VPSRAVD : avx2_var_shift<0x46, "vpsravd", sra, v4i32, v8i32>;
+}
 //===----------------------------------------------------------------------===//
 // VGATHER - GATHER Operations
 multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,

Modified: llvm/trunk/test/CodeGen/X86/avx-isa-check.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-isa-check.ll?rev=256324&r1=256323&r2=256324&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-isa-check.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-isa-check.ll Wed Dec 23 02:06:50 2015
@@ -429,3 +429,142 @@ define <4 x double> @shuffle_v4f64_0022(
   ret <4 x double> %shuffle
 }
 
+define <8 x i32> @ashr_v8i32(<8 x i32> %a, <8 x i32> %b) {
+  %shift = ashr <8 x i32> %a, %b
+  ret <8 x i32> %shift
+}
+
+define <8 x i32> @lshr_v8i32(<8 x i32> %a, <8 x i32> %b) {
+  %shift = lshr <8 x i32> %a, %b
+  ret <8 x i32> %shift
+}
+
+define <8 x i32> @shl_v8i32(<8 x i32> %a, <8 x i32> %b) {
+  %shift = shl <8 x i32> %a, %b
+  ret <8 x i32> %shift
+}
+
+define <8 x i32> @ashr_const_v8i32(<8 x i32> %a) {
+  %shift = ashr <8 x i32> %a,  <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+  ret <8 x i32> %shift
+}
+
+define <8 x i32> @lshr_const_v8i32(<8 x i32> %a) {
+  %shift = lshr <8 x i32> %a,  <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+  ret <8 x i32> %shift
+}
+
+define <8 x i32> @shl_const_v8i32(<8 x i32> %a) {
+  %shift = shl <8 x i32> %a,  <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+  ret <8 x i32> %shift
+}
+
+define <4 x i64> @ashr_v4i64(<4 x i64> %a, <4 x i64> %b) {
+  %shift = ashr <4 x i64> %a, %b
+  ret <4 x i64> %shift
+}
+
+define <4 x i64> @lshr_v4i64(<4 x i64> %a, <4 x i64> %b) {
+  %shift = lshr <4 x i64> %a, %b
+  ret <4 x i64> %shift
+}
+
+define <4 x i64> @shl_v4i64(<4 x i64> %a, <4 x i64> %b) {
+  %shift = shl <4 x i64> %a, %b
+  ret <4 x i64> %shift
+}
+
+define <4 x i64> @ashr_const_v4i64(<4 x i64> %a) {
+  %shift = ashr <4 x i64> %a,  <i64 3, i64 3, i64 3, i64 3>
+  ret <4 x i64> %shift
+}
+
+define <4 x i64> @lshr_const_v4i64(<4 x i64> %a) {
+  %shift = lshr <4 x i64> %a,  <i64 3, i64 3, i64 3, i64 3>
+  ret <4 x i64> %shift
+}
+
+define <4 x i64> @shl_const_v4i64(<4 x i64> %a) {
+  %shift = shl <4 x i64> %a,  <i64 3, i64 3, i64 3, i64 3>
+  ret <4 x i64> %shift
+}
+
+define <16 x i16> @ashr_v16i16(<16 x i16> %a, <16 x i16> %b) {
+  %shift = ashr <16 x i16> %a, %b
+  ret <16 x i16> %shift
+}
+
+define <16 x i16> @lshr_v16i16(<16 x i16> %a, <16 x i16> %b) {
+  %shift = lshr <16 x i16> %a, %b
+  ret <16 x i16> %shift
+}
+
+define <16 x i16> @shl_v16i16(<16 x i16> %a, <16 x i16> %b) {
+  %shift = shl <16 x i16> %a, %b
+  ret <16 x i16> %shift
+}
+
+define <16 x i16> @ashr_const_v16i16(<16 x i16> %a) {
+  %shift = ashr <16 x i16> %a,  <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+  ret <16 x i16> %shift
+}
+
+define <16 x i16> @lshr_const_v16i16(<16 x i16> %a) {
+  %shift = lshr <16 x i16> %a,  <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+  ret <16 x i16> %shift
+}
+
+define <16 x i16> @shl_const_v16i16(<16 x i16> %a) {
+  %shift = shl <16 x i16> %a,  <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+  ret <16 x i16> %shift
+}
+
+define <4 x i32> @ashr_v4i32(<4 x i32> %a, <4 x i32> %b) {
+  %shift = ashr <4 x i32> %a, %b
+  ret <4 x i32> %shift
+}
+
+define <4 x i32> @shl_const_v4i32(<4 x i32> %a) {
+  %shift = shl <4 x i32> %a,  <i32 3, i32 3, i32 3, i32 3>
+  ret <4 x i32> %shift
+}
+
+define <2 x i64> @ashr_v2i64(<2 x i64> %a, <2 x i64> %b) {
+  %shift = ashr <2 x i64> %a, %b
+  ret <2 x i64> %shift
+}
+
+define <2 x i64> @shl_const_v2i64(<2 x i64> %a) {
+  %shift = shl <2 x i64> %a,  <i64 3, i64 3>
+  ret <2 x i64> %shift
+}
+
+define <8 x i16> @ashr_v8i16(<8 x i16> %a, <8 x i16> %b) {
+  %shift = ashr <8 x i16> %a, %b
+  ret <8 x i16> %shift
+}
+
+define <8 x i16> @lshr_v8i16(<8 x i16> %a, <8 x i16> %b) {
+  %shift = lshr <8 x i16> %a, %b
+  ret <8 x i16> %shift
+}
+
+define <8 x i16> @shl_v8i16(<8 x i16> %a, <8 x i16> %b) {
+  %shift = shl <8 x i16> %a, %b
+  ret <8 x i16> %shift
+}
+
+define <8 x i16> @ashr_const_v8i16(<8 x i16> %a) {
+  %shift = ashr <8 x i16> %a,<i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+  ret <8 x i16> %shift
+}
+
+define <8 x i16> @lshr_const_v8i16(<8 x i16> %a) {
+  %shift = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+  ret <8 x i16> %shift
+}
+
+define <8 x i16> @shl_const_v8i16(<8 x i16> %a) {
+  %shift = shl <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
+  ret <8 x i16> %shift
+}

Modified: llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll?rev=256324&r1=256323&r2=256324&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll Wed Dec 23 02:06:50 2015
@@ -5,6 +5,8 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
+
 ;
 ; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
@@ -77,6 +79,15 @@ define <2 x i64> @var_shift_v2i64(<2 x i
 ; XOP-NEXT:    vpshaq %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: var_shift_v2i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX512-NEXT:    vpsrlvq %xmm1, %xmm2, %xmm3
+; AVX512-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlvq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpsubq %xmm3, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: var_shift_v2i64:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
@@ -177,6 +188,11 @@ define <4 x i32> @var_shift_v4i32(<4 x i
 ; XOPAVX2-NEXT:    vpsravd %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    retq
 ;
+; AVX512-LABEL: var_shift_v4i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsravd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: var_shift_v4i32:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movdqa %xmm1, %xmm2
@@ -305,6 +321,11 @@ define <8 x i16> @var_shift_v8i16(<8 x i
 ; XOP-NEXT:    vpshaw %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: var_shift_v8i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsravw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: var_shift_v8i16:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    psllw $12, %xmm1
@@ -473,6 +494,34 @@ define <16 x i8> @var_shift_v16i8(<16 x
 ; XOP-NEXT:    vpshab %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: var_shift_v16i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512-NEXT:    vpsraw $4, %xmm3, %xmm4
+; AVX512-NEXT:    vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX512-NEXT:    vpsraw $2, %xmm3, %xmm4
+; AVX512-NEXT:    vpaddw %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX512-NEXT:    vpsraw $1, %xmm3, %xmm4
+; AVX512-NEXT:    vpaddw %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
+; AVX512-NEXT:    vpsrlw $8, %xmm2, %xmm2
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512-NEXT:    vpsraw $4, %xmm0, %xmm3
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT:    vpsraw $2, %xmm0, %xmm3
+; AVX512-NEXT:    vpaddw %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT:    vpsraw $1, %xmm0, %xmm3
+; AVX512-NEXT:    vpaddw %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm0
+; AVX512-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: var_shift_v16i8:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
@@ -573,6 +622,15 @@ define <2 x i64> @splatvar_shift_v2i64(<
 ; XOPAVX2-NEXT:    vpshaq %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    retq
 ;
+; AVX512-LABEL: splatvar_shift_v2i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX512-NEXT:    vpsrlq %xmm1, %xmm2, %xmm2
+; AVX512-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatvar_shift_v2i64:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movq {{.*#+}} xmm1 = xmm1[0],zero
@@ -616,6 +674,13 @@ define <4 x i32> @splatvar_shift_v4i32(<
 ; XOP-NEXT:    vpsrad %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatvar_shift_v4i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vmovss %xmm1, %xmm2, %xmm1
+; AVX512-NEXT:    vpsrad %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatvar_shift_v4i32:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    xorps %xmm2, %xmm2
@@ -657,6 +722,13 @@ define <8 x i16> @splatvar_shift_v8i16(<
 ; XOP-NEXT:    vpsraw %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatvar_shift_v8i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
+; AVX512-NEXT:    vpsraw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatvar_shift_v8i16:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movd %xmm1, %eax
@@ -845,6 +917,35 @@ define <16 x i8> @splatvar_shift_v16i8(<
 ; XOPAVX2-NEXT:    vpshab %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    retq
 ;
+; AVX512-LABEL: splatvar_shift_v16i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX512-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512-NEXT:    vpsraw $4, %xmm3, %xmm4
+; AVX512-NEXT:    vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX512-NEXT:    vpsraw $2, %xmm3, %xmm4
+; AVX512-NEXT:    vpaddw %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX512-NEXT:    vpsraw $1, %xmm3, %xmm4
+; AVX512-NEXT:    vpaddw %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
+; AVX512-NEXT:    vpsrlw $8, %xmm2, %xmm2
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512-NEXT:    vpsraw $4, %xmm0, %xmm3
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT:    vpsraw $2, %xmm0, %xmm3
+; AVX512-NEXT:    vpaddw %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT:    vpsraw $1, %xmm0, %xmm3
+; AVX512-NEXT:    vpaddw %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm0
+; AVX512-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatvar_shift_v16i8:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -964,6 +1065,14 @@ define <2 x i64> @constant_shift_v2i64(<
 ; XOP-NEXT:    vpshaq %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: constant_shift_v2i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [4611686018427387904,72057594037927936]
+; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: constant_shift_v2i64:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648]
@@ -1040,6 +1149,11 @@ define <4 x i32> @constant_shift_v4i32(<
 ; XOPAVX2-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
 ; XOPAVX2-NEXT:    retq
 ;
+; AVX512-LABEL: constant_shift_v4i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: constant_shift_v4i32:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
@@ -1132,6 +1246,12 @@ define <8 x i16> @constant_shift_v8i16(<
 ; XOP-NEXT:    vpshaw %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: constant_shift_v8i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
+; AVX512-NEXT:    vpsravw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: constant_shift_v8i16:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
@@ -1285,6 +1405,35 @@ define <16 x i8> @constant_shift_v16i8(<
 ; XOP-NEXT:    vpshab %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: constant_shift_v16i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512-NEXT:    vpsraw $4, %xmm3, %xmm4
+; AVX512-NEXT:    vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX512-NEXT:    vpsraw $2, %xmm3, %xmm4
+; AVX512-NEXT:    vpaddw %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
+; AVX512-NEXT:    vpsraw $1, %xmm3, %xmm4
+; AVX512-NEXT:    vpaddw %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
+; AVX512-NEXT:    vpsrlw $8, %xmm2, %xmm2
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512-NEXT:    vpsraw $4, %xmm0, %xmm3
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT:    vpsraw $2, %xmm0, %xmm3
+; AVX512-NEXT:    vpaddw %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT:    vpsraw $1, %xmm0, %xmm3
+; AVX512-NEXT:    vpaddw %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $8, %xmm0, %xmm0
+; AVX512-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: constant_shift_v16i8:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
@@ -1391,6 +1540,13 @@ define <2 x i64> @splatconstant_shift_v2
 ; XOP-NEXT:    vpshaq %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatconstant_shift_v2i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrad $7, %xmm0, %xmm1
+; AVX512-NEXT:    vpsrlq $7, %xmm0, %xmm0
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatconstant_shift_v2i64:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
@@ -1420,6 +1576,11 @@ define <4 x i32> @splatconstant_shift_v4
 ; XOP-NEXT:    vpsrad $5, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatconstant_shift_v4i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrad $5, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatconstant_shift_v4i32:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    psrad $5, %xmm0
@@ -1444,6 +1605,11 @@ define <8 x i16> @splatconstant_shift_v8
 ; XOP-NEXT:    vpsraw $3, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatconstant_shift_v8i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsraw $3, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatconstant_shift_v8i16:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    psraw $3, %xmm0
@@ -1478,6 +1644,15 @@ define <16 x i8> @splatconstant_shift_v1
 ; XOP-NEXT:    vpshab %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatconstant_shift_v16i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlw $3, %xmm0, %xmm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatconstant_shift_v16i8:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    psrlw $3, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll?rev=256324&r1=256323&r2=256324&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll Wed Dec 23 02:06:50 2015
@@ -3,7 +3,7 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
-
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
 ;
 ; Variable Shifts
 ;
@@ -64,6 +64,15 @@ define <4 x i64> @var_shift_v4i64(<4 x i
 ; XOPAVX2-NEXT:    vpsrlvq %ymm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    vpsubq %ymm3, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: var_shift_v4i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm2
+; AVX512-NEXT:    vpsrlvq %ymm1, %ymm2, %ymm3
+; AVX512-NEXT:    vpxor %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpsrlvq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpsubq %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = ashr <4 x i64> %a, %b
   ret <4 x i64> %shift
 }
@@ -120,6 +129,11 @@ define <8 x i32> @var_shift_v8i32(<8 x i
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsravd %ymm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: var_shift_v8i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsravd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = ashr <8 x i32> %a, %b
   ret <8 x i32> %shift
 }
@@ -197,6 +211,11 @@ define <16 x i16> @var_shift_v16i16(<16
 ; XOPAVX2-NEXT:    vpshaw %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: var_shift_v16i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsravw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
   %shift = ashr <16 x i16> %a, %b
   ret <16 x i16> %shift
 }
@@ -308,6 +327,34 @@ define <32 x i8> @var_shift_v32i8(<32 x
 ; XOPAVX2-NEXT:    vpshab %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: var_shift_v32i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512-NEXT:    vpsraw $4, %ymm3, %ymm4
+; AVX512-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512-NEXT:    vpsraw $2, %ymm3, %ymm4
+; AVX512-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512-NEXT:    vpsraw $1, %ymm3, %ymm4
+; AVX512-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; AVX512-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512-NEXT:    vpsraw $4, %ymm0, %ymm3
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    vpsraw $2, %ymm0, %ymm3
+; AVX512-NEXT:    vpaddw %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    vpsraw $1, %ymm0, %ymm3
+; AVX512-NEXT:    vpaddw %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = ashr <32 x i8> %a, %b
   ret <32 x i8> %shift
 }
@@ -359,6 +406,15 @@ define <4 x i64> @splatvar_shift_v4i64(<
 ; XOPAVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_shift_v4i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm2
+; AVX512-NEXT:    vpsrlq %xmm1, %ymm2, %ymm2
+; AVX512-NEXT:    vpsrlq %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpxor %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
   %shift = ashr <4 x i64> %a, %splat
   ret <4 x i64> %shift
@@ -398,6 +454,13 @@ define <8 x i32> @splatvar_shift_v8i32(<
 ; XOPAVX2-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
 ; XOPAVX2-NEXT:    vpsrad %xmm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_shift_v8i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vmovss %xmm1, %xmm2, %xmm1
+; AVX512-NEXT:    vpsrad %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
   %shift = ashr <8 x i32> %a, %splat
   ret <8 x i32> %shift
@@ -441,6 +504,14 @@ define <16 x i16> @splatvar_shift_v16i16
 ; XOPAVX2-NEXT:    vmovd %eax, %xmm1
 ; XOPAVX2-NEXT:    vpsraw %xmm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_shift_v16i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovd %xmm1, %eax
+; AVX512-NEXT:    movzwl %ax, %eax
+; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vpsraw %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
   %shift = ashr <16 x i16> %a, %splat
   ret <16 x i16> %shift
@@ -548,6 +619,35 @@ define <32 x i8> @splatvar_shift_v32i8(<
 ; XOPAVX2-NEXT:    vpshab %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_shift_v32i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512-NEXT:    vpsraw $4, %ymm3, %ymm4
+; AVX512-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512-NEXT:    vpsraw $2, %ymm3, %ymm4
+; AVX512-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512-NEXT:    vpsraw $1, %ymm3, %ymm4
+; AVX512-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; AVX512-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512-NEXT:    vpsraw $4, %ymm0, %ymm3
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    vpsraw $2, %ymm0, %ymm3
+; AVX512-NEXT:    vpaddw %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    vpsraw $1, %ymm0, %ymm3
+; AVX512-NEXT:    vpaddw %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
   %shift = ashr <32 x i8> %a, %splat
   ret <32 x i8> %shift
@@ -602,6 +702,14 @@ define <4 x i64> @constant_shift_v4i64(<
 ; XOPAVX2-NEXT:    vpxor %ymm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    vpsubq %ymm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: constant_shift_v4i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm1 = [4611686018427387904,72057594037927936,4294967296,2]
+; AVX512-NEXT:    vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpsubq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = ashr <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
   ret <4 x i64> %shift
 }
@@ -642,6 +750,11 @@ define <8 x i32> @constant_shift_v8i32(<
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsravd {{.*}}(%rip), %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: constant_shift_v8i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsravd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = ashr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
   ret <8 x i32> %shift
 }
@@ -713,6 +826,12 @@ define <16 x i16> @constant_shift_v16i16
 ; XOPAVX2-NEXT:    vpshaw %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: constant_shift_v16i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512-NEXT:    vpsravw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
   %shift = ashr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
   ret <16 x i16> %shift
 }
@@ -814,6 +933,35 @@ define <32 x i8> @constant_shift_v32i8(<
 ; XOPAVX2-NEXT:    vpshab %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: constant_shift_v32i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512-NEXT:    vpsraw $4, %ymm3, %ymm4
+; AVX512-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512-NEXT:    vpsraw $2, %ymm3, %ymm4
+; AVX512-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512-NEXT:    vpsraw $1, %ymm3, %ymm4
+; AVX512-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
+; AVX512-NEXT:    vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; AVX512-NEXT:    vpsrlw $8, %ymm2, %ymm2
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512-NEXT:    vpsraw $4, %ymm0, %ymm3
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    vpsraw $2, %ymm0, %ymm3
+; AVX512-NEXT:    vpaddw %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    vpsraw $1, %ymm0, %ymm3
+; AVX512-NEXT:    vpaddw %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = ashr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
   ret <32 x i8> %shift
 }
@@ -859,6 +1007,13 @@ define <4 x i64> @splatconstant_shift_v4
 ; XOPAVX2-NEXT:    vpxor %ymm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    vpsubq %ymm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_shift_v4i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrad $7, %ymm0, %ymm1
+; AVX512-NEXT:    vpsrlq $7, %ymm0, %ymm0
+; AVX512-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX512-NEXT:    retq
   %shift = ashr <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
   ret <4 x i64> %shift
 }
@@ -889,6 +1044,11 @@ define <8 x i32> @splatconstant_shift_v8
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsrad $5, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_shift_v8i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrad $5, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = ashr <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
   ret <8 x i32> %shift
 }
@@ -919,6 +1079,11 @@ define <16 x i16> @splatconstant_shift_v
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsraw $3, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_shift_v16i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsraw $3, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = ashr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
   ret <16 x i16> %shift
 }
@@ -967,6 +1132,15 @@ define <32 x i8> @splatconstant_shift_v3
 ; XOPAVX2-NEXT:    vpxor %ymm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    vpsubb %ymm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_shift_v32i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlw $3, %ymm0, %ymm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512-NEXT:    vpxor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpsubb %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = ashr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   ret <32 x i8> %shift
 }

Modified: llvm/trunk/test/CodeGen/X86/vector-shift-ashr-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-ashr-512.ll?rev=256324&r1=256323&r2=256324&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-ashr-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-ashr-512.ll Wed Dec 23 02:06:50 2015
@@ -1,7 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; TODO: Add AVX512BW shift support
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
-
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
 ;
 ; Variable Shifts
 ;
@@ -25,84 +24,89 @@ define <16 x i32> @var_shift_v16i32(<16
 }
 
 define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
-; ALL-LABEL: var_shift_v32i16:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpxor %ymm4, %ymm4, %ymm4
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT:    vpsravd %ymm5, %ymm6, %ymm5
-; ALL-NEXT:    vpsrld $16, %ymm5, %ymm5
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT:    vpsravd %ymm2, %ymm0, %ymm0
-; ALL-NEXT:    vpsrld $16, %ymm0, %ymm0
-; ALL-NEXT:    vpackusdw %ymm5, %ymm0, %ymm0
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT:    vpsravd %ymm2, %ymm5, %ymm2
-; ALL-NEXT:    vpsrld $16, %ymm2, %ymm2
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT:    vpsravd %ymm3, %ymm1, %ymm1
-; ALL-NEXT:    vpsrld $16, %ymm1, %ymm1
-; ALL-NEXT:    vpackusdw %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: var_shift_v32i16:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT:    vpsravd %ymm5, %ymm6, %ymm5
+; AVX512DQ-NEXT:    vpsrld $16, %ymm5, %ymm5
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT:    vpsravd %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrld $16, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpackusdw %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT:    vpsravd %ymm2, %ymm5, %ymm2
+; AVX512DQ-NEXT:    vpsrld $16, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT:    vpsravd %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsrld $16, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpackusdw %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+;
+; AVX512BW-LABEL: var_shift_v32i16:
+; AVX512BW:       ## BB#0:
+; AVX512BW-NEXT:    vpsravw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
   %shift = ashr <32 x i16> %a, %b
   ret <32 x i16> %shift
 }
 
 define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
-; ALL-LABEL: var_shift_v64i8:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpsllw $5, %ymm2, %ymm2
-; ALL-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
-; ALL-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; ALL-NEXT:    vpsraw $4, %ymm5, %ymm6
-; ALL-NEXT:    vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
-; ALL-NEXT:    vpsraw $2, %ymm5, %ymm6
-; ALL-NEXT:    vpaddw %ymm4, %ymm4, %ymm4
-; ALL-NEXT:    vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
-; ALL-NEXT:    vpsraw $1, %ymm5, %ymm6
-; ALL-NEXT:    vpaddw %ymm4, %ymm4, %ymm4
-; ALL-NEXT:    vpblendvb %ymm4, %ymm6, %ymm5, %ymm4
-; ALL-NEXT:    vpsrlw $8, %ymm4, %ymm4
-; ALL-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
-; ALL-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; ALL-NEXT:    vpsraw $4, %ymm0, %ymm5
-; ALL-NEXT:    vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
-; ALL-NEXT:    vpsraw $2, %ymm0, %ymm5
-; ALL-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
-; ALL-NEXT:    vpsraw $1, %ymm0, %ymm5
-; ALL-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw $8, %ymm0, %ymm0
-; ALL-NEXT:    vpackuswb %ymm4, %ymm0, %ymm0
-; ALL-NEXT:    vpsllw $5, %ymm3, %ymm2
-; ALL-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
-; ALL-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; ALL-NEXT:    vpsraw $4, %ymm4, %ymm5
-; ALL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
-; ALL-NEXT:    vpsraw $2, %ymm4, %ymm5
-; ALL-NEXT:    vpaddw %ymm3, %ymm3, %ymm3
-; ALL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
-; ALL-NEXT:    vpsraw $1, %ymm4, %ymm5
-; ALL-NEXT:    vpaddw %ymm3, %ymm3, %ymm3
-; ALL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
-; ALL-NEXT:    vpsrlw $8, %ymm3, %ymm3
-; ALL-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
-; ALL-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; ALL-NEXT:    vpsraw $4, %ymm1, %ymm4
-; ALL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; ALL-NEXT:    vpsraw $2, %ymm1, %ymm4
-; ALL-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; ALL-NEXT:    vpsraw $1, %ymm1, %ymm4
-; ALL-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; ALL-NEXT:    vpsrlw $8, %ymm1, %ymm1
-; ALL-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: var_shift_v64i8:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpsllw $5, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512DQ-NEXT:    vpsraw $4, %ymm5, %ymm6
+; AVX512DQ-NEXT:    vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
+; AVX512DQ-NEXT:    vpsraw $2, %ymm5, %ymm6
+; AVX512DQ-NEXT:    vpaddw %ymm4, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpblendvb %ymm4, %ymm6, %ymm5, %ymm5
+; AVX512DQ-NEXT:    vpsraw $1, %ymm5, %ymm6
+; AVX512DQ-NEXT:    vpaddw %ymm4, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpblendvb %ymm4, %ymm6, %ymm5, %ymm4
+; AVX512DQ-NEXT:    vpsrlw $8, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512DQ-NEXT:    vpsraw $4, %ymm0, %ymm5
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsraw $2, %ymm0, %ymm5
+; AVX512DQ-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsraw $1, %ymm0, %ymm5
+; AVX512DQ-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpackuswb %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsllw $5, %ymm3, %ymm2
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512DQ-NEXT:    vpsraw $4, %ymm4, %ymm5
+; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpsraw $2, %ymm4, %ymm5
+; AVX512DQ-NEXT:    vpaddw %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpsraw $1, %ymm4, %ymm5
+; AVX512DQ-NEXT:    vpaddw %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
+; AVX512DQ-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512DQ-NEXT:    vpsraw $4, %ymm1, %ymm4
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsraw $2, %ymm1, %ymm4
+; AVX512DQ-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsraw $1, %ymm1, %ymm4
+; AVX512DQ-NEXT:    vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
   %shift = ashr <64 x i8> %a, %b
   ret <64 x i8> %shift
 }
@@ -134,65 +138,73 @@ define <16 x i32> @splatvar_shift_v16i32
 }
 
 define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
-; ALL-LABEL: splatvar_shift_v32i16:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vmovd %xmm2, %eax
-; ALL-NEXT:    movzwl %ax, %eax
-; ALL-NEXT:    vmovd %eax, %xmm2
-; ALL-NEXT:    vpsraw %xmm2, %ymm0, %ymm0
-; ALL-NEXT:    vpsraw %xmm2, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: splatvar_shift_v32i16:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vmovd %xmm2, %eax
+; AVX512DQ-NEXT:    movzwl %ax, %eax
+; AVX512DQ-NEXT:    vmovd %eax, %xmm2
+; AVX512DQ-NEXT:    vpsraw %xmm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsraw %xmm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_shift_v32i16:
+; AVX512BW:       ## BB#0:
+; AVX512BW-NEXT:    vmovd %xmm1, %eax
+; AVX512BW-NEXT:    movzwl %ax, %eax
+; AVX512BW-NEXT:    vmovd %eax, %xmm1
+; AVX512BW-NEXT:    vpsraw %xmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
   %splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
   %shift = ashr <32 x i16> %a, %splat
   ret <32 x i16> %shift
 }
 
 define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
-; ALL-LABEL: splatvar_shift_v64i8:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpbroadcastb %xmm2, %ymm2
-; ALL-NEXT:    vpsllw $5, %ymm2, %ymm2
-; ALL-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
-; ALL-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; ALL-NEXT:    vpsraw $4, %ymm4, %ymm5
-; ALL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
-; ALL-NEXT:    vpsraw $2, %ymm4, %ymm5
-; ALL-NEXT:    vpaddw %ymm3, %ymm3, %ymm6
-; ALL-NEXT:    vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
-; ALL-NEXT:    vpsraw $1, %ymm4, %ymm5
-; ALL-NEXT:    vpaddw %ymm6, %ymm6, %ymm7
-; ALL-NEXT:    vpblendvb %ymm7, %ymm5, %ymm4, %ymm4
-; ALL-NEXT:    vpsrlw $8, %ymm4, %ymm4
-; ALL-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
-; ALL-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; ALL-NEXT:    vpsraw $4, %ymm0, %ymm5
-; ALL-NEXT:    vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
-; ALL-NEXT:    vpsraw $2, %ymm0, %ymm5
-; ALL-NEXT:    vpaddw %ymm2, %ymm2, %ymm8
-; ALL-NEXT:    vpblendvb %ymm8, %ymm5, %ymm0, %ymm0
-; ALL-NEXT:    vpsraw $1, %ymm0, %ymm5
-; ALL-NEXT:    vpaddw %ymm8, %ymm8, %ymm9
-; ALL-NEXT:    vpblendvb %ymm9, %ymm5, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw $8, %ymm0, %ymm0
-; ALL-NEXT:    vpackuswb %ymm4, %ymm0, %ymm0
-; ALL-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; ALL-NEXT:    vpsraw $4, %ymm4, %ymm5
-; ALL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
-; ALL-NEXT:    vpsraw $2, %ymm3, %ymm4
-; ALL-NEXT:    vpblendvb %ymm6, %ymm4, %ymm3, %ymm3
-; ALL-NEXT:    vpsraw $1, %ymm3, %ymm4
-; ALL-NEXT:    vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
-; ALL-NEXT:    vpsrlw $8, %ymm3, %ymm3
-; ALL-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; ALL-NEXT:    vpsraw $4, %ymm1, %ymm4
-; ALL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; ALL-NEXT:    vpsraw $2, %ymm1, %ymm2
-; ALL-NEXT:    vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpsraw $1, %ymm1, %ymm2
-; ALL-NEXT:    vpblendvb %ymm9, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpsrlw $8, %ymm1, %ymm1
-; ALL-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: splatvar_shift_v64i8:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512DQ-NEXT:    vpsllw $5, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512DQ-NEXT:    vpsraw $4, %ymm4, %ymm5
+; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpsraw $2, %ymm4, %ymm5
+; AVX512DQ-NEXT:    vpaddw %ymm3, %ymm3, %ymm6
+; AVX512DQ-NEXT:    vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpsraw $1, %ymm4, %ymm5
+; AVX512DQ-NEXT:    vpaddw %ymm6, %ymm6, %ymm7
+; AVX512DQ-NEXT:    vpblendvb %ymm7, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpsrlw $8, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512DQ-NEXT:    vpsraw $4, %ymm0, %ymm5
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsraw $2, %ymm0, %ymm5
+; AVX512DQ-NEXT:    vpaddw %ymm2, %ymm2, %ymm8
+; AVX512DQ-NEXT:    vpblendvb %ymm8, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsraw $1, %ymm0, %ymm5
+; AVX512DQ-NEXT:    vpaddw %ymm8, %ymm8, %ymm9
+; AVX512DQ-NEXT:    vpblendvb %ymm9, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpackuswb %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512DQ-NEXT:    vpsraw $4, %ymm4, %ymm5
+; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
+; AVX512DQ-NEXT:    vpsraw $2, %ymm3, %ymm4
+; AVX512DQ-NEXT:    vpblendvb %ymm6, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpsraw $1, %ymm3, %ymm4
+; AVX512DQ-NEXT:    vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512DQ-NEXT:    vpsraw $4, %ymm1, %ymm4
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsraw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsraw $1, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm9, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
   %splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
   %shift = ashr <64 x i8> %a, %splat
   ret <64 x i8> %shift
@@ -221,77 +233,82 @@ define <16 x i32> @constant_shift_v16i32
 }
 
 define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
-; ALL-LABEL: constant_shift_v32i16:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpxor %ymm2, %ymm2, %ymm2
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT:    vpsravd %ymm4, %ymm5, %ymm5
-; ALL-NEXT:    vpsrld $16, %ymm5, %ymm5
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT:    vpsravd %ymm2, %ymm0, %ymm0
-; ALL-NEXT:    vpsrld $16, %ymm0, %ymm0
-; ALL-NEXT:    vpackusdw %ymm5, %ymm0, %ymm0
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT:    vpsravd %ymm4, %ymm3, %ymm3
-; ALL-NEXT:    vpsrld $16, %ymm3, %ymm3
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT:    vpsravd %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpsrld $16, %ymm1, %ymm1
-; ALL-NEXT:    vpackusdw %ymm3, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: constant_shift_v32i16:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT:    vpsravd %ymm4, %ymm5, %ymm5
+; AVX512DQ-NEXT:    vpsrld $16, %ymm5, %ymm5
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT:    vpsravd %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrld $16, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpackusdw %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT:    vpsravd %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpsrld $16, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT:    vpsravd %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsrld $16, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpackusdw %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_shift_v32i16:
+; AVX512BW:       ## BB#0:
+; AVX512BW-NEXT:    vpsravw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
   %shift = ashr <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
   ret <32 x i16> %shift
 }
 
 define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
-; ALL-LABEL: constant_shift_v64i8:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; ALL-NEXT:    vpsllw $5, %ymm2, %ymm2
-; ALL-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
-; ALL-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; ALL-NEXT:    vpsraw $4, %ymm4, %ymm5
-; ALL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
-; ALL-NEXT:    vpsraw $2, %ymm4, %ymm5
-; ALL-NEXT:    vpaddw %ymm3, %ymm3, %ymm6
-; ALL-NEXT:    vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
-; ALL-NEXT:    vpsraw $1, %ymm4, %ymm5
-; ALL-NEXT:    vpaddw %ymm6, %ymm6, %ymm7
-; ALL-NEXT:    vpblendvb %ymm7, %ymm5, %ymm4, %ymm4
-; ALL-NEXT:    vpsrlw $8, %ymm4, %ymm4
-; ALL-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
-; ALL-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; ALL-NEXT:    vpsraw $4, %ymm0, %ymm5
-; ALL-NEXT:    vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
-; ALL-NEXT:    vpsraw $2, %ymm0, %ymm5
-; ALL-NEXT:    vpaddw %ymm2, %ymm2, %ymm8
-; ALL-NEXT:    vpblendvb %ymm8, %ymm5, %ymm0, %ymm0
-; ALL-NEXT:    vpsraw $1, %ymm0, %ymm5
-; ALL-NEXT:    vpaddw %ymm8, %ymm8, %ymm9
-; ALL-NEXT:    vpblendvb %ymm9, %ymm5, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw $8, %ymm0, %ymm0
-; ALL-NEXT:    vpackuswb %ymm4, %ymm0, %ymm0
-; ALL-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; ALL-NEXT:    vpsraw $4, %ymm4, %ymm5
-; ALL-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
-; ALL-NEXT:    vpsraw $2, %ymm3, %ymm4
-; ALL-NEXT:    vpblendvb %ymm6, %ymm4, %ymm3, %ymm3
-; ALL-NEXT:    vpsraw $1, %ymm3, %ymm4
-; ALL-NEXT:    vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
-; ALL-NEXT:    vpsrlw $8, %ymm3, %ymm3
-; ALL-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; ALL-NEXT:    vpsraw $4, %ymm1, %ymm4
-; ALL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
-; ALL-NEXT:    vpsraw $2, %ymm1, %ymm2
-; ALL-NEXT:    vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpsraw $1, %ymm1, %ymm2
-; ALL-NEXT:    vpblendvb %ymm9, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpsrlw $8, %ymm1, %ymm1
-; ALL-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: constant_shift_v64i8:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512DQ-NEXT:    vpsllw $5, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512DQ-NEXT:    vpsraw $4, %ymm4, %ymm5
+; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpsraw $2, %ymm4, %ymm5
+; AVX512DQ-NEXT:    vpaddw %ymm3, %ymm3, %ymm6
+; AVX512DQ-NEXT:    vpblendvb %ymm6, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpsraw $1, %ymm4, %ymm5
+; AVX512DQ-NEXT:    vpaddw %ymm6, %ymm6, %ymm7
+; AVX512DQ-NEXT:    vpblendvb %ymm7, %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpsrlw $8, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512DQ-NEXT:    vpsraw $4, %ymm0, %ymm5
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsraw $2, %ymm0, %ymm5
+; AVX512DQ-NEXT:    vpaddw %ymm2, %ymm2, %ymm8
+; AVX512DQ-NEXT:    vpblendvb %ymm8, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsraw $1, %ymm0, %ymm5
+; AVX512DQ-NEXT:    vpaddw %ymm8, %ymm8, %ymm9
+; AVX512DQ-NEXT:    vpblendvb %ymm9, %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpackuswb %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512DQ-NEXT:    vpsraw $4, %ymm4, %ymm5
+; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm5, %ymm4, %ymm3
+; AVX512DQ-NEXT:    vpsraw $2, %ymm3, %ymm4
+; AVX512DQ-NEXT:    vpblendvb %ymm6, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpsraw $1, %ymm3, %ymm4
+; AVX512DQ-NEXT:    vpblendvb %ymm7, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpsrlw $8, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512DQ-NEXT:    vpsraw $4, %ymm1, %ymm4
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm4, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsraw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsraw $1, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm9, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
   %shift = ashr <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
   ret <64 x i8> %shift
 }
@@ -319,29 +336,43 @@ define <16 x i32> @splatconstant_shift_v
 }
 
 define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind {
-; ALL-LABEL: splatconstant_shift_v32i16:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpsraw $3, %ymm0, %ymm0
-; ALL-NEXT:    vpsraw $3, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: splatconstant_shift_v32i16:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpsraw $3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsraw $3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_shift_v32i16:
+; AVX512BW:       ## BB#0:
+; AVX512BW-NEXT:    vpsraw $3, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
   %shift = ashr <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
   ret <32 x i16> %shift
 }
 
 define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
-; ALL-LABEL: splatconstant_shift_v64i8:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpsrlw $3, %ymm0, %ymm0
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
-; ALL-NEXT:    vpand %ymm2, %ymm0, %ymm0
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
-; ALL-NEXT:    vpxor %ymm3, %ymm0, %ymm0
-; ALL-NEXT:    vpsubb %ymm3, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw $3, %ymm1, %ymm1
-; ALL-NEXT:    vpand %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpxor %ymm3, %ymm1, %ymm1
-; ALL-NEXT:    vpsubb %ymm3, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: splatconstant_shift_v64i8:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpsrlw $3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512DQ-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512DQ-NEXT:    vpxor %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsubb %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw $3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpxor %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsubb %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_shift_v64i8:
+; AVX512BW:       ## BB#0:
+; AVX512BW-NEXT:    vpsrlw $3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqu8 {{.*#+}} zmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; AVX512BW-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpsubb %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
   %shift = ashr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   ret <64 x i8> %shift
 }

Modified: llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll?rev=256324&r1=256323&r2=256324&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-lshr-128.ll Wed Dec 23 02:06:50 2015
@@ -5,6 +5,7 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
 ;
 ; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
@@ -58,6 +59,11 @@ define <2 x i64> @var_shift_v2i64(<2 x i
 ; XOPAVX2-NEXT:    vpsrlvq %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    retq
 ;
+; AVX512-LABEL: var_shift_v2i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlvq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: var_shift_v2i64:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
@@ -151,6 +157,11 @@ define <4 x i32> @var_shift_v4i32(<4 x i
 ; XOPAVX2-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    retq
 ;
+; AVX512-LABEL: var_shift_v4i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: var_shift_v4i32:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movdqa %xmm1, %xmm2
@@ -279,6 +290,11 @@ define <8 x i16> @var_shift_v8i16(<8 x i
 ; XOP-NEXT:    vpshlw %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: var_shift_v8i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: var_shift_v8i16:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    psllw $12, %xmm1
@@ -396,6 +412,22 @@ define <16 x i8> @var_shift_v16i8(<16 x
 ; XOP-NEXT:    vpshlb %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: var_shift_v16i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT:    vpsrlw $4, %xmm0, %xmm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $2, %xmm0, %xmm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $1, %xmm0, %xmm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: var_shift_v16i8:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    psllw $5, %xmm1
@@ -450,6 +482,11 @@ define <2 x i64> @splatvar_shift_v2i64(<
 ; XOP-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatvar_shift_v2i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatvar_shift_v2i64:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movq {{.*#+}} xmm1 = xmm1[0],zero
@@ -489,6 +526,13 @@ define <4 x i32> @splatvar_shift_v4i32(<
 ; XOP-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatvar_shift_v4i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vmovss %xmm1, %xmm2, %xmm1
+; AVX512-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatvar_shift_v4i32:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    xorps %xmm2, %xmm2
@@ -530,6 +574,13 @@ define <8 x i16> @splatvar_shift_v8i16(<
 ; XOP-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatvar_shift_v8i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
+; AVX512-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatvar_shift_v8i16:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movd %xmm1, %eax
@@ -656,6 +707,23 @@ define <16 x i8> @splatvar_shift_v16i8(<
 ; XOPAVX2-NEXT:    vpshlb %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    retq
 ;
+; AVX512-LABEL: splatvar_shift_v16i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX512-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT:    vpsrlw $4, %xmm0, %xmm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $2, %xmm0, %xmm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $1, %xmm0, %xmm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatvar_shift_v16i8:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -741,6 +809,11 @@ define <2 x i64> @constant_shift_v2i64(<
 ; XOPAVX2-NEXT:    vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
 ; XOPAVX2-NEXT:    retq
 ;
+; AVX512-LABEL: constant_shift_v2i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: constant_shift_v2i64:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
@@ -810,6 +883,11 @@ define <4 x i32> @constant_shift_v4i32(<
 ; XOPAVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
 ; XOPAVX2-NEXT:    retq
 ;
+; AVX512-LABEL: constant_shift_v4i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: constant_shift_v4i32:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
@@ -902,6 +980,12 @@ define <8 x i16> @constant_shift_v8i16(<
 ; XOP-NEXT:    vpshlw %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: constant_shift_v8i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
+; AVX512-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: constant_shift_v8i16:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
@@ -1001,6 +1085,23 @@ define <16 x i8> @constant_shift_v16i8(<
 ; XOP-NEXT:    vpshlb %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: constant_shift_v16i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT:    vpsrlw $4, %xmm0, %xmm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $2, %xmm0, %xmm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpsrlw $1, %xmm0, %xmm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: constant_shift_v16i8:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
@@ -1056,6 +1157,11 @@ define <2 x i64> @splatconstant_shift_v2
 ; XOP-NEXT:    vpsrlq $7, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatconstant_shift_v2i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlq $7, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatconstant_shift_v2i64:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    psrlq $7, %xmm0
@@ -1080,6 +1186,11 @@ define <4 x i32> @splatconstant_shift_v4
 ; XOP-NEXT:    vpsrld $5, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatconstant_shift_v4i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrld $5, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatconstant_shift_v4i32:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    psrld $5, %xmm0
@@ -1104,6 +1215,11 @@ define <8 x i16> @splatconstant_shift_v8
 ; XOP-NEXT:    vpsrlw $3, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatconstant_shift_v8i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlw $3, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatconstant_shift_v8i16:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    psrlw $3, %xmm0
@@ -1132,6 +1248,12 @@ define <16 x i8> @splatconstant_shift_v1
 ; XOP-NEXT:    vpshlb %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatconstant_shift_v16i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlw $3, %xmm0, %xmm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatconstant_shift_v16i8:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    psrlw $3, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll?rev=256324&r1=256323&r2=256324&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-lshr-256.ll Wed Dec 23 02:06:50 2015
@@ -3,7 +3,7 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
-
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
 ;
 ; Variable Shifts
 ;
@@ -45,6 +45,11 @@ define <4 x i64> @var_shift_v4i64(<4 x i
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsrlvq %ymm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: var_shift_v4i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlvq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = lshr <4 x i64> %a, %b
   ret <4 x i64> %shift
 }
@@ -101,6 +106,11 @@ define <8 x i32> @var_shift_v8i32(<8 x i
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: var_shift_v8i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = lshr <8 x i32> %a, %b
   ret <8 x i32> %shift
 }
@@ -178,6 +188,11 @@ define <16 x i16> @var_shift_v16i16(<16
 ; XOPAVX2-NEXT:    vpshlw %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: var_shift_v16i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
   %shift = lshr <16 x i16> %a, %b
   ret <16 x i16> %shift
 }
@@ -256,6 +271,22 @@ define <32 x i8> @var_shift_v32i8(<32 x
 ; XOPAVX2-NEXT:    vpshlb %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: var_shift_v32i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpsrlw $2, %ymm0, %ymm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpsrlw $1, %ymm0, %ymm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = lshr <32 x i8> %a, %b
   ret <32 x i8> %shift
 }
@@ -290,6 +321,11 @@ define <4 x i64> @splatvar_shift_v4i64(<
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsrlq %xmm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_shift_v4i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlq %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
   %shift = lshr <4 x i64> %a, %splat
   ret <4 x i64> %shift
@@ -329,6 +365,13 @@ define <8 x i32> @splatvar_shift_v8i32(<
 ; XOPAVX2-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
 ; XOPAVX2-NEXT:    vpsrld %xmm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_shift_v8i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vmovss %xmm1, %xmm2, %xmm1
+; AVX512-NEXT:    vpsrld %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
   %shift = lshr <8 x i32> %a, %splat
   ret <8 x i32> %shift
@@ -372,6 +415,14 @@ define <16 x i16> @splatvar_shift_v16i16
 ; XOPAVX2-NEXT:    vmovd %eax, %xmm1
 ; XOPAVX2-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_shift_v16i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovd %xmm1, %eax
+; AVX512-NEXT:    movzwl %ax, %eax
+; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vpsrlw %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
   %shift = lshr <16 x i16> %a, %splat
   ret <16 x i16> %shift
@@ -450,6 +501,23 @@ define <32 x i8> @splatvar_shift_v32i8(<
 ; XOPAVX2-NEXT:    vpshlb %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_shift_v32i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpsrlw $2, %ymm0, %ymm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpsrlw $1, %ymm0, %ymm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
   %shift = lshr <32 x i8> %a, %splat
   ret <32 x i8> %shift
@@ -492,6 +560,11 @@ define <4 x i64> @constant_shift_v4i64(<
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: constant_shift_v4i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = lshr <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
   ret <4 x i64> %shift
 }
@@ -532,6 +605,11 @@ define <8 x i32> @constant_shift_v8i32(<
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: constant_shift_v8i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = lshr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
   ret <8 x i32> %shift
 }
@@ -603,6 +681,12 @@ define <16 x i16> @constant_shift_v16i16
 ; XOPAVX2-NEXT:    vpshlw %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: constant_shift_v16i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
   %shift = lshr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
   ret <16 x i16> %shift
 }
@@ -675,6 +759,23 @@ define <32 x i8> @constant_shift_v32i8(<
 ; XOPAVX2-NEXT:    vpshlb %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: constant_shift_v32i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpsrlw $2, %ymm0, %ymm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpsrlw $1, %ymm0, %ymm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = lshr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
   ret <32 x i8> %shift
 }
@@ -709,6 +810,11 @@ define <4 x i64> @splatconstant_shift_v4
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsrlq $7, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_shift_v4i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlq $7, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = lshr <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
   ret <4 x i64> %shift
 }
@@ -739,6 +845,11 @@ define <8 x i32> @splatconstant_shift_v8
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsrld $5, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_shift_v8i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrld $5, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = lshr <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
   ret <8 x i32> %shift
 }
@@ -769,6 +880,11 @@ define <16 x i16> @splatconstant_shift_v
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsrlw $3, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_shift_v16i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlw $3, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = lshr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
   ret <16 x i16> %shift
 }
@@ -806,6 +922,12 @@ define <32 x i8> @splatconstant_shift_v3
 ; XOPAVX2-NEXT:    vpsrlw $3, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_shift_v32i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsrlw $3, %ymm0, %ymm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = lshr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   ret <32 x i8> %shift
 }

Modified: llvm/trunk/test/CodeGen/X86/vector-shift-lshr-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-lshr-512.ll?rev=256324&r1=256323&r2=256324&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-lshr-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-lshr-512.ll Wed Dec 23 02:06:50 2015
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; TODO: Add AVX512BW shift support
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
 
 ;
 ; Variable Shifts
@@ -25,63 +25,69 @@ define <16 x i32> @var_shift_v16i32(<16
 }
 
 define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
-; ALL-LABEL: var_shift_v32i16:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpxor %ymm4, %ymm4, %ymm4
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT:    vpsrlvd %ymm5, %ymm6, %ymm5
-; ALL-NEXT:    vpsrld $16, %ymm5, %ymm5
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT:    vpsrlvd %ymm2, %ymm0, %ymm0
-; ALL-NEXT:    vpsrld $16, %ymm0, %ymm0
-; ALL-NEXT:    vpackusdw %ymm5, %ymm0, %ymm0
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT:    vpsrlvd %ymm2, %ymm5, %ymm2
-; ALL-NEXT:    vpsrld $16, %ymm2, %ymm2
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT:    vpsrlvd %ymm3, %ymm1, %ymm1
-; ALL-NEXT:    vpsrld $16, %ymm1, %ymm1
-; ALL-NEXT:    vpackusdw %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: var_shift_v32i16:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT:    vpsrlvd %ymm5, %ymm6, %ymm5
+; AVX512DQ-NEXT:    vpsrld $16, %ymm5, %ymm5
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT:    vpsrlvd %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrld $16, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpackusdw %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT:    vpsrlvd %ymm2, %ymm5, %ymm2
+; AVX512DQ-NEXT:    vpsrld $16, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT:    vpsrlvd %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsrld $16, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpackusdw %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+;
+; AVX512BW-LABEL: var_shift_v32i16:
+; AVX512BW:       ## BB#0:
+; AVX512BW-NEXT:    vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
   %shift = lshr <32 x i16> %a, %b
   ret <32 x i16> %shift
 }
 
 define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
-; ALL-LABEL: var_shift_v64i8:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpsrlw $4, %ymm0, %ymm4
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; ALL-NEXT:    vpand %ymm5, %ymm4, %ymm4
-; ALL-NEXT:    vpsllw $5, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw $2, %ymm0, %ymm4
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; ALL-NEXT:    vpand %ymm6, %ymm4, %ymm4
-; ALL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw $1, %ymm0, %ymm4
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; ALL-NEXT:    vpand %ymm7, %ymm4, %ymm4
-; ALL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw $4, %ymm1, %ymm2
-; ALL-NEXT:    vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT:    vpsllw $5, %ymm3, %ymm3
-; ALL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpsrlw $2, %ymm1, %ymm2
-; ALL-NEXT:    vpand %ymm6, %ymm2, %ymm2
-; ALL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; ALL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpsrlw $1, %ymm1, %ymm2
-; ALL-NEXT:    vpand %ymm7, %ymm2, %ymm2
-; ALL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; ALL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: var_shift_v64i8:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpsrlw $4, %ymm0, %ymm4
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512DQ-NEXT:    vpand %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpsllw $5, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw $2, %ymm0, %ymm4
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512DQ-NEXT:    vpand %ymm6, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw $1, %ymm0, %ymm4
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512DQ-NEXT:    vpand %ymm7, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw $4, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpsllw $5, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsrlw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpand %ymm6, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsrlw $1, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+
   %shift = lshr <64 x i8> %a, %b
   ret <64 x i8> %shift
 }
@@ -113,48 +119,56 @@ define <16 x i32> @splatvar_shift_v16i32
 }
 
 define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
-; ALL-LABEL: splatvar_shift_v32i16:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vmovd %xmm2, %eax
-; ALL-NEXT:    movzwl %ax, %eax
-; ALL-NEXT:    vmovd %eax, %xmm2
-; ALL-NEXT:    vpsrlw %xmm2, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw %xmm2, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: splatvar_shift_v32i16:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vmovd %xmm2, %eax
+; AVX512DQ-NEXT:    movzwl %ax, %eax
+; AVX512DQ-NEXT:    vmovd %eax, %xmm2
+; AVX512DQ-NEXT:    vpsrlw %xmm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw %xmm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_shift_v32i16:
+; AVX512BW:       ## BB#0:
+; AVX512BW-NEXT:    vmovd %xmm1, %eax
+; AVX512BW-NEXT:    movzwl %ax, %eax
+; AVX512BW-NEXT:    vmovd %eax, %xmm1
+; AVX512BW-NEXT:    vpsrlw %xmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
   %splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
   %shift = lshr <32 x i16> %a, %splat
   ret <32 x i16> %shift
 }
 
 define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
-; ALL-LABEL: splatvar_shift_v64i8:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpbroadcastb %xmm2, %ymm2
-; ALL-NEXT:    vpsrlw $4, %ymm0, %ymm3
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; ALL-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; ALL-NEXT:    vpsllw $5, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw $2, %ymm0, %ymm3
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; ALL-NEXT:    vpand %ymm5, %ymm3, %ymm3
-; ALL-NEXT:    vpaddb %ymm2, %ymm2, %ymm6
-; ALL-NEXT:    vpblendvb %ymm6, %ymm3, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw $1, %ymm0, %ymm3
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; ALL-NEXT:    vpand %ymm7, %ymm3, %ymm3
-; ALL-NEXT:    vpaddb %ymm6, %ymm6, %ymm8
-; ALL-NEXT:    vpblendvb %ymm8, %ymm3, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw $4, %ymm1, %ymm3
-; ALL-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; ALL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
-; ALL-NEXT:    vpsrlw $2, %ymm1, %ymm2
-; ALL-NEXT:    vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpsrlw $1, %ymm1, %ymm2
-; ALL-NEXT:    vpand %ymm7, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: splatvar_shift_v64i8:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512DQ-NEXT:    vpsrlw $4, %ymm0, %ymm3
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512DQ-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpsllw $5, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw $2, %ymm0, %ymm3
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512DQ-NEXT:    vpand %ymm5, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpaddb %ymm2, %ymm2, %ymm6
+; AVX512DQ-NEXT:    vpblendvb %ymm6, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw $1, %ymm0, %ymm3
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512DQ-NEXT:    vpand %ymm7, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpaddb %ymm6, %ymm6, %ymm8
+; AVX512DQ-NEXT:    vpblendvb %ymm8, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw $4, %ymm1, %ymm3
+; AVX512DQ-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsrlw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsrlw $1, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
   %splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
   %shift = lshr <64 x i8> %a, %splat
   ret <64 x i8> %shift
@@ -183,60 +197,65 @@ define <16 x i32> @constant_shift_v16i32
 }
 
 define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
-; ALL-LABEL: constant_shift_v32i16:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpxor %ymm2, %ymm2, %ymm2
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT:    vpsrlvd %ymm4, %ymm5, %ymm5
-; ALL-NEXT:    vpsrld $16, %ymm5, %ymm5
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT:    vpsrlvd %ymm2, %ymm0, %ymm0
-; ALL-NEXT:    vpsrld $16, %ymm0, %ymm0
-; ALL-NEXT:    vpackusdw %ymm5, %ymm0, %ymm0
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT:    vpsrlvd %ymm4, %ymm3, %ymm3
-; ALL-NEXT:    vpsrld $16, %ymm3, %ymm3
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT:    vpsrlvd %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpsrld $16, %ymm1, %ymm1
-; ALL-NEXT:    vpackusdw %ymm3, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: constant_shift_v32i16:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpxor %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT:    vpsrlvd %ymm4, %ymm5, %ymm5
+; AVX512DQ-NEXT:    vpsrld $16, %ymm5, %ymm5
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT:    vpsrlvd %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrld $16, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpackusdw %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT:    vpsrlvd %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpsrld $16, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT:    vpsrlvd %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsrld $16, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpackusdw %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_shift_v32i16:
+; AVX512BW:       ## BB#0:
+; AVX512BW-NEXT:    vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
   %shift = lshr <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
   ret <32 x i16> %shift
 }
 
 define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
-; ALL-LABEL: constant_shift_v64i8:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpsrlw $4, %ymm0, %ymm2
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; ALL-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; ALL-NEXT:    vpsllw $5, %ymm4, %ymm4
-; ALL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw $2, %ymm0, %ymm2
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; ALL-NEXT:    vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT:    vpaddb %ymm4, %ymm4, %ymm6
-; ALL-NEXT:    vpblendvb %ymm6, %ymm2, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw $1, %ymm0, %ymm2
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; ALL-NEXT:    vpand %ymm7, %ymm2, %ymm2
-; ALL-NEXT:    vpaddb %ymm6, %ymm6, %ymm8
-; ALL-NEXT:    vpblendvb %ymm8, %ymm2, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw $4, %ymm1, %ymm2
-; ALL-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpsrlw $2, %ymm1, %ymm2
-; ALL-NEXT:    vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpsrlw $1, %ymm1, %ymm2
-; ALL-NEXT:    vpand %ymm7, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: constant_shift_v64i8:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpsrlw $4, %ymm0, %ymm2
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512DQ-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512DQ-NEXT:    vpsllw $5, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw $2, %ymm0, %ymm2
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512DQ-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpaddb %ymm4, %ymm4, %ymm6
+; AVX512DQ-NEXT:    vpblendvb %ymm6, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw $1, %ymm0, %ymm2
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512DQ-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpaddb %ymm6, %ymm6, %ymm8
+; AVX512DQ-NEXT:    vpblendvb %ymm8, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw $4, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsrlw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsrlw $1, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
   %shift = lshr <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
   ret <64 x i8> %shift
 }
@@ -264,24 +283,35 @@ define <16 x i32> @splatconstant_shift_v
 }
 
 define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind {
-; ALL-LABEL: splatconstant_shift_v32i16:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpsrlw $3, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw $3, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: splatconstant_shift_v32i16:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpsrlw $3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw $3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_shift_v32i16:
+; AVX512BW:       ## BB#0:
+; AVX512BW-NEXT:    vpsrlw $3, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
   %shift = lshr <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
   ret <32 x i16> %shift
 }
 
 define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
-; ALL-LABEL: splatconstant_shift_v64i8:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpsrlw $3, %ymm0, %ymm0
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
-; ALL-NEXT:    vpand %ymm2, %ymm0, %ymm0
-; ALL-NEXT:    vpsrlw $3, %ymm1, %ymm1
-; ALL-NEXT:    vpand %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: splatconstant_shift_v64i8:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpsrlw $3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
+; AVX512DQ-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrlw $3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_shift_v64i8:
+; AVX512BW:       ## BB#0:
+; AVX512BW-NEXT:    vpsrlw $3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
   %shift = lshr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   ret <64 x i8> %shift
 }

Modified: llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll?rev=256324&r1=256323&r2=256324&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-shl-128.ll Wed Dec 23 02:06:50 2015
@@ -5,6 +5,7 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
 ;
 ; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
@@ -56,6 +57,11 @@ define <2 x i64> @var_shift_v2i64(<2 x i
 ; XOPAVX2-NEXT:    vpsllvq %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    retq
 ;
+; AVX512-LABEL: var_shift_v2i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllvq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: var_shift_v2i64:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
@@ -117,6 +123,11 @@ define <4 x i32> @var_shift_v4i32(<4 x i
 ; XOPAVX2-NEXT:    vpsllvd %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    retq
 ;
+; AVX512-LABEL: var_shift_v4i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllvd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: var_shift_v4i32:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    pslld $23, %xmm1
@@ -234,6 +245,11 @@ define <8 x i16> @var_shift_v8i16(<8 x i
 ; XOP-NEXT:    vpshlw %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: var_shift_v8i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: var_shift_v8i16:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    psllw $12, %xmm1
@@ -346,6 +362,21 @@ define <16 x i8> @var_shift_v16i8(<16 x
 ; XOP-NEXT:    vpshlb %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: var_shift_v16i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT:    vpsllw $4, %xmm0, %xmm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpsllw $2, %xmm0, %xmm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
+; AVX512-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: var_shift_v16i8:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    psllw $5, %xmm1
@@ -399,6 +430,11 @@ define <2 x i64> @splatvar_shift_v2i64(<
 ; XOP-NEXT:    vpsllq %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatvar_shift_v2i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatvar_shift_v2i64:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movq {{.*#+}} xmm1 = xmm1[0],zero
@@ -438,6 +474,13 @@ define <4 x i32> @splatvar_shift_v4i32(<
 ; XOP-NEXT:    vpslld %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatvar_shift_v4i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vmovss %xmm1, %xmm2, %xmm1
+; AVX512-NEXT:    vpslld %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatvar_shift_v4i32:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    xorps %xmm2, %xmm2
@@ -479,6 +522,13 @@ define <8 x i16> @splatvar_shift_v8i16(<
 ; XOP-NEXT:    vpsllw %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatvar_shift_v8i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
+; AVX512-NEXT:    vpsllw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatvar_shift_v8i16:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movd %xmm1, %eax
@@ -598,6 +648,22 @@ define <16 x i8> @splatvar_shift_v16i8(<
 ; XOPAVX2-NEXT:    vpshlb %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    retq
 ;
+; AVX512-LABEL: splatvar_shift_v16i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpbroadcastb %xmm1, %xmm1
+; AVX512-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT:    vpsllw $4, %xmm0, %xmm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpsllw $2, %xmm0, %xmm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
+; AVX512-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatvar_shift_v16i8:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -680,6 +746,11 @@ define <2 x i64> @constant_shift_v2i64(<
 ; XOPAVX2-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
 ; XOPAVX2-NEXT:    retq
 ;
+; AVX512-LABEL: constant_shift_v2i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: constant_shift_v2i64:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movdqa %xmm0, %xmm1
@@ -730,6 +801,11 @@ define <4 x i32> @constant_shift_v4i32(<
 ; XOPAVX2-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
 ; XOPAVX2-NEXT:    retq
 ;
+; AVX512-LABEL: constant_shift_v4i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: constant_shift_v4i32:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movdqa {{.*#+}} xmm1 = [16,32,64,128]
@@ -761,6 +837,12 @@ define <8 x i16> @constant_shift_v8i16(<
 ; XOP-NEXT:    vpshlw {{.*}}(%rip), %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: constant_shift_v8i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
+; AVX512-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: constant_shift_v8i16:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    pmullw .LCPI10_0, %xmm0
@@ -843,6 +925,22 @@ define <16 x i8> @constant_shift_v16i8(<
 ; XOP-NEXT:    vpshlb {{.*}}(%rip), %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: constant_shift_v16i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512-NEXT:    vpsllw $5, %xmm1, %xmm1
+; AVX512-NEXT:    vpsllw $4, %xmm0, %xmm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpsllw $2, %xmm0, %xmm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpaddb %xmm0, %xmm0, %xmm2
+; AVX512-NEXT:    vpaddb %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: constant_shift_v16i8:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
@@ -897,6 +995,11 @@ define <2 x i64> @splatconstant_shift_v2
 ; XOP-NEXT:    vpsllq $7, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatconstant_shift_v2i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllq $7, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatconstant_shift_v2i64:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    psllq $7, %xmm0
@@ -921,6 +1024,11 @@ define <4 x i32> @splatconstant_shift_v4
 ; XOP-NEXT:    vpslld $5, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatconstant_shift_v4i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpslld $5, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatconstant_shift_v4i32:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    pslld $5, %xmm0
@@ -945,6 +1053,11 @@ define <8 x i16> @splatconstant_shift_v8
 ; XOP-NEXT:    vpsllw $3, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatconstant_shift_v8i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllw $3, %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatconstant_shift_v8i16:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    psllw $3, %xmm0
@@ -971,6 +1084,12 @@ define <16 x i8> @splatconstant_shift_v1
 ; XOP-NEXT:    vpshlb {{.*}}(%rip), %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
+; AVX512-LABEL: splatconstant_shift_v16i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllw $3, %xmm0, %xmm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    retq
+;
 ; X32-SSE-LABEL: splatconstant_shift_v16i8:
 ; X32-SSE:       # BB#0:
 ; X32-SSE-NEXT:    psllw $3, %xmm0

Modified: llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll?rev=256324&r1=256323&r2=256324&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-shl-256.ll Wed Dec 23 02:06:50 2015
@@ -3,6 +3,7 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
 
 ;
 ; Variable Shifts
@@ -42,6 +43,11 @@ define <4 x i64> @var_shift_v4i64(<4 x i
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsllvq %ymm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: var_shift_v4i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllvq %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = shl <4 x i64> %a, %b
   ret <4 x i64> %shift
 }
@@ -81,6 +87,11 @@ define <8 x i32> @var_shift_v8i32(<8 x i
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: var_shift_v8i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllvd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = shl <8 x i32> %a, %b
   ret <8 x i32> %shift
 }
@@ -152,6 +163,11 @@ define <16 x i16> @var_shift_v16i16(<16
 ; XOPAVX2-NEXT:    vpshlw %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: var_shift_v16i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
   %shift = shl <16 x i16> %a, %b
   ret <16 x i16> %shift
 }
@@ -220,6 +236,21 @@ define <32 x i8> @var_shift_v32i8(<32 x
 ; XOPAVX2-NEXT:    vpshlb %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: var_shift_v32i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpsllw $2, %ymm0, %ymm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = shl <32 x i8> %a, %b
   ret <32 x i8> %shift
 }
@@ -254,6 +285,11 @@ define <4 x i64> @splatvar_shift_v4i64(<
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsllq %xmm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_shift_v4i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllq %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
   %shift = shl <4 x i64> %a, %splat
   ret <4 x i64> %shift
@@ -293,6 +329,13 @@ define <8 x i32> @splatvar_shift_v8i32(<
 ; XOPAVX2-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
 ; XOPAVX2-NEXT:    vpslld %xmm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_shift_v8i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vmovss %xmm1, %xmm2, %xmm1
+; AVX512-NEXT:    vpslld %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
   %shift = shl <8 x i32> %a, %splat
   ret <8 x i32> %shift
@@ -336,6 +379,14 @@ define <16 x i16> @splatvar_shift_v16i16
 ; XOPAVX2-NEXT:    vmovd %eax, %xmm1
 ; XOPAVX2-NEXT:    vpsllw %xmm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_shift_v16i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovd %xmm1, %eax
+; AVX512-NEXT:    movzwl %ax, %eax
+; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vpsllw %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
   %shift = shl <16 x i16> %a, %splat
   ret <16 x i16> %shift
@@ -406,6 +457,22 @@ define <32 x i8> @splatvar_shift_v32i8(<
 ; XOPAVX2-NEXT:    vpshlb %xmm1, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatvar_shift_v32i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpbroadcastb %xmm1, %ymm1
+; AVX512-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpsllw $2, %ymm0, %ymm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
   %shift = shl <32 x i8> %a, %splat
   ret <32 x i8> %shift
@@ -445,6 +512,11 @@ define <4 x i64> @constant_shift_v4i64(<
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: constant_shift_v4i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = shl <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
   ret <4 x i64> %shift
 }
@@ -475,6 +547,11 @@ define <8 x i32> @constant_shift_v8i32(<
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: constant_shift_v8i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = shl <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
   ret <8 x i32> %shift
 }
@@ -505,6 +582,12 @@ define <16 x i16> @constant_shift_v16i16
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: constant_shift_v16i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
   %shift = shl <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
   ret <16 x i16> %shift
 }
@@ -571,6 +654,22 @@ define <32 x i8> @constant_shift_v32i8(<
 ; XOPAVX2-NEXT:    vpshlb %xmm2, %xmm0, %xmm0
 ; XOPAVX2-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: constant_shift_v32i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512-NEXT:    vpsllw $5, %ymm1, %ymm1
+; AVX512-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpsllw $2, %ymm0, %ymm2
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512-NEXT:    vpaddb %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = shl <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
   ret <32 x i8> %shift
 }
@@ -605,6 +704,11 @@ define <4 x i64> @splatconstant_shift_v4
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsllq $7, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_shift_v4i64:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllq $7, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = shl <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
   ret <4 x i64> %shift
 }
@@ -635,6 +739,11 @@ define <8 x i32> @splatconstant_shift_v8
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpslld $5, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_shift_v8i32:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpslld $5, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = shl <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
   ret <8 x i32> %shift
 }
@@ -665,6 +774,11 @@ define <16 x i16> @splatconstant_shift_v
 ; XOPAVX2:       # BB#0:
 ; XOPAVX2-NEXT:    vpsllw $3, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_shift_v16i16:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllw $3, %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = shl <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
   ret <16 x i16> %shift
 }
@@ -701,6 +815,12 @@ define <32 x i8> @splatconstant_shift_v3
 ; XOPAVX2-NEXT:    vpsllw $3, %ymm0, %ymm0
 ; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
 ; XOPAVX2-NEXT:    retq
+;
+; AVX512-LABEL: splatconstant_shift_v32i8:
+; AVX512:       ## BB#0:
+; AVX512-NEXT:    vpsllw $3, %ymm0, %ymm0
+; AVX512-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512-NEXT:    retq
   %shift = shl <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   ret <32 x i8> %shift
 }

Modified: llvm/trunk/test/CodeGen/X86/vector-shift-shl-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shift-shl-512.ll?rev=256324&r1=256323&r2=256324&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shift-shl-512.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shift-shl-512.ll Wed Dec 23 02:06:50 2015
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; TODO: Add AVX512BW shift support
 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
 
 ;
 ; Variable Shifts
@@ -25,60 +25,65 @@ define <16 x i32> @var_shift_v16i32(<16
 }
 
 define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
-; ALL-LABEL: var_shift_v32i16:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpxor %ymm4, %ymm4, %ymm4
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT:    vpsllvd %ymm5, %ymm6, %ymm5
-; ALL-NEXT:    vpsrld $16, %ymm5, %ymm5
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT:    vpsllvd %ymm2, %ymm0, %ymm0
-; ALL-NEXT:    vpsrld $16, %ymm0, %ymm0
-; ALL-NEXT:    vpackusdw %ymm5, %ymm0, %ymm0
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
-; ALL-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
-; ALL-NEXT:    vpsllvd %ymm2, %ymm5, %ymm2
-; ALL-NEXT:    vpsrld $16, %ymm2, %ymm2
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
-; ALL-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; ALL-NEXT:    vpsllvd %ymm3, %ymm1, %ymm1
-; ALL-NEXT:    vpsrld $16, %ymm1, %ymm1
-; ALL-NEXT:    vpackusdw %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: var_shift_v32i16:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpxor %ymm4, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT:    vpsllvd %ymm5, %ymm6, %ymm5
+; AVX512DQ-NEXT:    vpsrld $16, %ymm5, %ymm5
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT:    vpsllvd %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsrld $16, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpackusdw %ymm5, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
+; AVX512DQ-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
+; AVX512DQ-NEXT:    vpsllvd %ymm2, %ymm5, %ymm2
+; AVX512DQ-NEXT:    vpsrld $16, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512DQ-NEXT:    vpsllvd %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsrld $16, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpackusdw %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+;
+; AVX512BW-LABEL: var_shift_v32i16:
+; AVX512BW:       ## BB#0:
+; AVX512BW-NEXT:    vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
   %shift = shl <32 x i16> %a, %b
   ret <32 x i16> %shift
 }
 
 define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
-; ALL-LABEL: var_shift_v64i8:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpsllw $4, %ymm0, %ymm4
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; ALL-NEXT:    vpand %ymm5, %ymm4, %ymm4
-; ALL-NEXT:    vpsllw $5, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; ALL-NEXT:    vpsllw $2, %ymm0, %ymm4
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; ALL-NEXT:    vpand %ymm6, %ymm4, %ymm4
-; ALL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; ALL-NEXT:    vpaddb %ymm0, %ymm0, %ymm4
-; ALL-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
-; ALL-NEXT:    vpsllw $4, %ymm1, %ymm2
-; ALL-NEXT:    vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT:    vpsllw $5, %ymm3, %ymm3
-; ALL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpsllw $2, %ymm1, %ymm2
-; ALL-NEXT:    vpand %ymm6, %ymm2, %ymm2
-; ALL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; ALL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpaddb %ymm1, %ymm1, %ymm2
-; ALL-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
-; ALL-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: var_shift_v64i8:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpsllw $4, %ymm0, %ymm4
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512DQ-NEXT:    vpand %ymm5, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpsllw $5, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsllw $2, %ymm0, %ymm4
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512DQ-NEXT:    vpand %ymm6, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpaddb %ymm0, %ymm0, %ymm4
+; AVX512DQ-NEXT:    vpaddb %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsllw $4, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpsllw $5, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsllw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpand %ymm6, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpaddb %ymm1, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpaddb %ymm3, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
   %shift = shl <64 x i8> %a, %b
   ret <64 x i8> %shift
 }
@@ -110,45 +115,54 @@ define <16 x i32> @splatvar_shift_v16i32
 }
 
 define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
-; ALL-LABEL: splatvar_shift_v32i16:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vmovd %xmm2, %eax
-; ALL-NEXT:    movzwl %ax, %eax
-; ALL-NEXT:    vmovd %eax, %xmm2
-; ALL-NEXT:    vpsllw %xmm2, %ymm0, %ymm0
-; ALL-NEXT:    vpsllw %xmm2, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: splatvar_shift_v32i16:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vmovd %xmm2, %eax
+; AVX512DQ-NEXT:    movzwl %ax, %eax
+; AVX512DQ-NEXT:    vmovd %eax, %xmm2
+; AVX512DQ-NEXT:    vpsllw %xmm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsllw %xmm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+;
+; AVX512BW-LABEL: splatvar_shift_v32i16:
+; AVX512BW:       ## BB#0:
+; AVX512BW-NEXT:    vmovd %xmm1, %eax
+; AVX512BW-NEXT:    movzwl %ax, %eax
+; AVX512BW-NEXT:    vmovd %eax, %xmm1
+; AVX512BW-NEXT:    vpsllw %xmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
   %splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
   %shift = shl <32 x i16> %a, %splat
   ret <32 x i16> %shift
 }
 
 define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
-; ALL-LABEL: splatvar_shift_v64i8:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpbroadcastb %xmm2, %ymm2
-; ALL-NEXT:    vpsllw $4, %ymm0, %ymm3
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; ALL-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; ALL-NEXT:    vpsllw $5, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
-; ALL-NEXT:    vpsllw $2, %ymm0, %ymm3
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; ALL-NEXT:    vpand %ymm5, %ymm3, %ymm3
-; ALL-NEXT:    vpaddb %ymm2, %ymm2, %ymm6
-; ALL-NEXT:    vpblendvb %ymm6, %ymm3, %ymm0, %ymm0
-; ALL-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
-; ALL-NEXT:    vpaddb %ymm6, %ymm6, %ymm7
-; ALL-NEXT:    vpblendvb %ymm7, %ymm3, %ymm0, %ymm0
-; ALL-NEXT:    vpsllw $4, %ymm1, %ymm3
-; ALL-NEXT:    vpand %ymm4, %ymm3, %ymm3
-; ALL-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
-; ALL-NEXT:    vpsllw $2, %ymm1, %ymm2
-; ALL-NEXT:    vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpaddb %ymm1, %ymm1, %ymm2
-; ALL-NEXT:    vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: splatvar_shift_v64i8:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpbroadcastb %xmm2, %ymm2
+; AVX512DQ-NEXT:    vpsllw $4, %ymm0, %ymm3
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512DQ-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpsllw $5, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsllw $2, %ymm0, %ymm3
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512DQ-NEXT:    vpand %ymm5, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpaddb %ymm2, %ymm2, %ymm6
+; AVX512DQ-NEXT:    vpblendvb %ymm6, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpaddb %ymm0, %ymm0, %ymm3
+; AVX512DQ-NEXT:    vpaddb %ymm6, %ymm6, %ymm7
+; AVX512DQ-NEXT:    vpblendvb %ymm7, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsllw $4, %ymm1, %ymm3
+; AVX512DQ-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT:    vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsllw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpaddb %ymm1, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+
   %splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
   %shift = shl <64 x i8> %a, %splat
   ret <64 x i8> %shift
@@ -177,42 +191,47 @@ define <16 x i32> @constant_shift_v16i32
 }
 
 define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
-; ALL-LABEL: constant_shift_v32i16:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
-; ALL-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
-; ALL-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: constant_shift_v32i16:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; AVX512DQ-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+;
+; AVX512BW-LABEL: constant_shift_v32i16:
+; AVX512BW:       ## BB#0:
+; AVX512BW-NEXT:    vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
   %shift = shl <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
   ret <32 x i16> %shift
 }
 
 define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
-; ALL-LABEL: constant_shift_v64i8:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpsllw $4, %ymm0, %ymm2
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; ALL-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; ALL-NEXT:    vpsllw $5, %ymm4, %ymm4
-; ALL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
-; ALL-NEXT:    vpsllw $2, %ymm0, %ymm2
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; ALL-NEXT:    vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT:    vpaddb %ymm4, %ymm4, %ymm6
-; ALL-NEXT:    vpblendvb %ymm6, %ymm2, %ymm0, %ymm0
-; ALL-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
-; ALL-NEXT:    vpaddb %ymm6, %ymm6, %ymm7
-; ALL-NEXT:    vpblendvb %ymm7, %ymm2, %ymm0, %ymm0
-; ALL-NEXT:    vpsllw $4, %ymm1, %ymm2
-; ALL-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpsllw $2, %ymm1, %ymm2
-; ALL-NEXT:    vpand %ymm5, %ymm2, %ymm2
-; ALL-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    vpaddb %ymm1, %ymm1, %ymm2
-; ALL-NEXT:    vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: constant_shift_v64i8:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpsllw $4, %ymm0, %ymm2
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
+; AVX512DQ-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512DQ-NEXT:    vpsllw $5, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsllw $2, %ymm0, %ymm2
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
+; AVX512DQ-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpaddb %ymm4, %ymm4, %ymm6
+; AVX512DQ-NEXT:    vpblendvb %ymm6, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpaddb %ymm0, %ymm0, %ymm2
+; AVX512DQ-NEXT:    vpaddb %ymm6, %ymm6, %ymm7
+; AVX512DQ-NEXT:    vpblendvb %ymm7, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsllw $4, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpsllw $2, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpand %ymm5, %ymm2, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpaddb %ymm1, %ymm1, %ymm2
+; AVX512DQ-NEXT:    vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
   %shift = shl <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
   ret <64 x i8> %shift
 }
@@ -240,24 +259,35 @@ define <16 x i32> @splatconstant_shift_v
 }
 
 define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind {
-; ALL-LABEL: splatconstant_shift_v32i16:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpsllw $3, %ymm0, %ymm0
-; ALL-NEXT:    vpsllw $3, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: splatconstant_shift_v32i16:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpsllw $3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsllw $3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_shift_v32i16:
+; AVX512BW:       ## BB#0:
+; AVX512BW-NEXT:    vpsllw $3, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
   %shift = shl <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
   ret <32 x i16> %shift
 }
 
 define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
-; ALL-LABEL: splatconstant_shift_v64i8:
-; ALL:       ## BB#0:
-; ALL-NEXT:    vpsllw $3, %ymm0, %ymm0
-; ALL-NEXT:    vmovdqa {{.*#+}} ymm2 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
-; ALL-NEXT:    vpand %ymm2, %ymm0, %ymm0
-; ALL-NEXT:    vpsllw $3, %ymm1, %ymm1
-; ALL-NEXT:    vpand %ymm2, %ymm1, %ymm1
-; ALL-NEXT:    retq
+; AVX512DQ-LABEL: splatconstant_shift_v64i8:
+; AVX512DQ:       ## BB#0:
+; AVX512DQ-NEXT:    vpsllw $3, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm2 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
+; AVX512DQ-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vpsllw $3, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT:    retq
+;
+; AVX512BW-LABEL: splatconstant_shift_v64i8:
+; AVX512BW:       ## BB#0:
+; AVX512BW-NEXT:    vpsllw $3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
   %shift = shl <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   ret <64 x i8> %shift
 }




More information about the llvm-commits mailing list