[llvm] r336035 - [X86] Remove masking from avx512 rotate intrinsics. Use select in IR instead.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Fri Jun 29 18:32:04 PDT 2018
Author: ctopper
Date: Fri Jun 29 18:32:04 2018
New Revision: 336035
URL: http://llvm.org/viewvc/llvm-project?rev=336035&view=rev
Log:
[X86] Remove masking from avx512 rotate intrinsics. Use select in IR instead.
Modified:
llvm/trunk/include/llvm/IR/IntrinsicsX86.td
llvm/trunk/lib/IR/AutoUpgrade.cpp
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h
llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
llvm/trunk/test/CodeGen/X86/avx512-rotate.ll
llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll
Modified: llvm/trunk/include/llvm/IR/IntrinsicsX86.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IntrinsicsX86.td?rev=336035&r1=336034&r2=336035&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/IntrinsicsX86.td (original)
+++ llvm/trunk/include/llvm/IR/IntrinsicsX86.td Fri Jun 29 18:32:04 2018
@@ -1740,81 +1740,81 @@ let TargetPrefix = "x86" in { // All in
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
[IntrNoMem]>;
- def int_x86_avx512_mask_prorv_d_128 : GCCBuiltin<"__builtin_ia32_prorvd128_mask">,
+ def int_x86_avx512_prorv_d_128 : GCCBuiltin<"__builtin_ia32_prorvd128">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prorv_d_256 : GCCBuiltin<"__builtin_ia32_prorvd256_mask">,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_prorv_d_256 : GCCBuiltin<"__builtin_ia32_prorvd256">,
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prorv_d_512 : GCCBuiltin<"__builtin_ia32_prorvd512_mask">,
+ llvm_v8i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_prorv_d_512 : GCCBuiltin<"__builtin_ia32_prorvd512">,
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prorv_q_128 : GCCBuiltin<"__builtin_ia32_prorvq128_mask">,
+ llvm_v16i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_prorv_q_128 : GCCBuiltin<"__builtin_ia32_prorvq128">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prorv_q_256 : GCCBuiltin<"__builtin_ia32_prorvq256_mask">,
+ llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_prorv_q_256 : GCCBuiltin<"__builtin_ia32_prorvq256">,
Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prorv_q_512 : GCCBuiltin<"__builtin_ia32_prorvq512_mask">,
+ llvm_v4i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_prorv_q_512 : GCCBuiltin<"__builtin_ia32_prorvq512">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ llvm_v8i64_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prol_d_128 : GCCBuiltin<"__builtin_ia32_prold128_mask">,
+ def int_x86_avx512_prol_d_128 : GCCBuiltin<"__builtin_ia32_prold128">,
Intrinsic<[llvm_v4i32_ty] , [llvm_v4i32_ty,
- llvm_i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prol_d_256 : GCCBuiltin<"__builtin_ia32_prold256_mask">,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_prol_d_256 : GCCBuiltin<"__builtin_ia32_prold256">,
Intrinsic<[llvm_v8i32_ty] , [llvm_v8i32_ty,
- llvm_i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prol_d_512 : GCCBuiltin<"__builtin_ia32_prold512_mask">,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_prol_d_512 : GCCBuiltin<"__builtin_ia32_prold512">,
Intrinsic<[llvm_v16i32_ty] , [llvm_v16i32_ty,
- llvm_i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prol_q_128 : GCCBuiltin<"__builtin_ia32_prolq128_mask">,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_prol_q_128 : GCCBuiltin<"__builtin_ia32_prolq128">,
Intrinsic<[llvm_v2i64_ty] , [llvm_v2i64_ty,
- llvm_i32_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prol_q_256 : GCCBuiltin<"__builtin_ia32_prolq256_mask">,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_prol_q_256 : GCCBuiltin<"__builtin_ia32_prolq256">,
Intrinsic<[llvm_v4i64_ty] , [llvm_v4i64_ty,
- llvm_i32_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prol_q_512 : GCCBuiltin<"__builtin_ia32_prolq512_mask">,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_prol_q_512 : GCCBuiltin<"__builtin_ia32_prolq512">,
Intrinsic<[llvm_v8i64_ty] , [llvm_v8i64_ty,
- llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ llvm_i32_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prolv_d_128 : GCCBuiltin<"__builtin_ia32_prolvd128_mask">,
+ def int_x86_avx512_prolv_d_128 : GCCBuiltin<"__builtin_ia32_prolvd128">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prolv_d_256 : GCCBuiltin<"__builtin_ia32_prolvd256_mask">,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_prolv_d_256 : GCCBuiltin<"__builtin_ia32_prolvd256">,
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
- llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prolv_d_512 : GCCBuiltin<"__builtin_ia32_prolvd512_mask">,
+ llvm_v8i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_prolv_d_512 : GCCBuiltin<"__builtin_ia32_prolvd512">,
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prolv_q_128 : GCCBuiltin<"__builtin_ia32_prolvq128_mask">,
+ llvm_v16i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_prolv_q_128 : GCCBuiltin<"__builtin_ia32_prolvq128">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prolv_q_256 : GCCBuiltin<"__builtin_ia32_prolvq256_mask">,
+ llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_prolv_q_256 : GCCBuiltin<"__builtin_ia32_prolvq256">,
Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
- llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_prolv_q_512 : GCCBuiltin<"__builtin_ia32_prolvq512_mask">,
+ llvm_v4i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_prolv_q_512 : GCCBuiltin<"__builtin_ia32_prolvq512">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pror_d_128 : GCCBuiltin<"__builtin_ia32_prord128_mask">,
+ llvm_v8i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_pror_d_128 : GCCBuiltin<"__builtin_ia32_prord128">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
- llvm_i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pror_d_256 : GCCBuiltin<"__builtin_ia32_prord256_mask">,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_pror_d_256 : GCCBuiltin<"__builtin_ia32_prord256">,
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
- llvm_i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pror_d_512 : GCCBuiltin<"__builtin_ia32_prord512_mask">,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_pror_d_512 : GCCBuiltin<"__builtin_ia32_prord512">,
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
- llvm_i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pror_q_128 : GCCBuiltin<"__builtin_ia32_prorq128_mask">,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_pror_q_128 : GCCBuiltin<"__builtin_ia32_prorq128">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
- llvm_i32_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pror_q_256 : GCCBuiltin<"__builtin_ia32_prorq256_mask">,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_pror_q_256 : GCCBuiltin<"__builtin_ia32_prorq256">,
Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
- llvm_i32_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
- def int_x86_avx512_mask_pror_q_512 : GCCBuiltin<"__builtin_ia32_prorq512_mask">,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_pror_q_512 : GCCBuiltin<"__builtin_ia32_prorq512">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
- llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ llvm_i32_ty], [IntrNoMem]>;
}
Modified: llvm/trunk/lib/IR/AutoUpgrade.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/AutoUpgrade.cpp?rev=336035&r1=336034&r2=336035&view=diff
==============================================================================
--- llvm/trunk/lib/IR/AutoUpgrade.cpp (original)
+++ llvm/trunk/lib/IR/AutoUpgrade.cpp Fri Jun 29 18:32:04 2018
@@ -258,6 +258,10 @@ static bool ShouldUpgradeX86Intrinsic(Fu
Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0
Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0
Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0
+ Name.startswith("avx512.mask.prorv.") || // Added in 7.0
+ Name.startswith("avx512.mask.pror.") || // Added in 7.0
+ Name.startswith("avx512.mask.prolv.") || // Added in 7.0
+ Name.startswith("avx512.mask.prol.") || // Added in 7.0
Name == "sse.cvtsi2ss" || // Added in 7.0
Name == "sse.cvtsi642ss" || // Added in 7.0
Name == "sse2.cvtsi2sd" || // Added in 7.0
@@ -1274,6 +1278,66 @@ static bool upgradeAVX512MaskToSelect(St
IID = Intrinsic::x86_avx512_vpshrd_w_512;
else
llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("prorv.")) {
+ if (VecWidth == 128 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_prorv_d_128;
+ else if (VecWidth == 256 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_prorv_d_256;
+ else if (VecWidth == 512 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_prorv_d_512;
+ else if (VecWidth == 128 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_prorv_q_128;
+ else if (VecWidth == 256 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_prorv_q_256;
+ else if (VecWidth == 512 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_prorv_q_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("prolv.")) {
+ if (VecWidth == 128 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_prolv_d_128;
+ else if (VecWidth == 256 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_prolv_d_256;
+ else if (VecWidth == 512 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_prolv_d_512;
+ else if (VecWidth == 128 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_prolv_q_128;
+ else if (VecWidth == 256 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_prolv_q_256;
+ else if (VecWidth == 512 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_prolv_q_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("pror.")) {
+ if (VecWidth == 128 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_pror_d_128;
+ else if (VecWidth == 256 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_pror_d_256;
+ else if (VecWidth == 512 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_pror_d_512;
+ else if (VecWidth == 128 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_pror_q_128;
+ else if (VecWidth == 256 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_pror_q_256;
+ else if (VecWidth == 512 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_pror_q_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
+ } else if (Name.startswith("prol.")) {
+ if (VecWidth == 128 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_prol_d_128;
+ else if (VecWidth == 256 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_prol_d_256;
+ else if (VecWidth == 512 && EltWidth == 32)
+ IID = Intrinsic::x86_avx512_prol_d_512;
+ else if (VecWidth == 128 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_prol_q_128;
+ else if (VecWidth == 256 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_prol_q_256;
+ else if (VecWidth == 512 && EltWidth == 64)
+ IID = Intrinsic::x86_avx512_prol_q_512;
+ else
+ llvm_unreachable("Unexpected intrinsic");
} else
return false;
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=336035&r1=336034&r2=336035&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Fri Jun 29 18:32:04 2018
@@ -20411,7 +20411,13 @@ SDValue X86TargetLowering::LowerINTRINSI
}
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
}
- case INTR_TYPE_2OP: {
+ case INTR_TYPE_2OP:
+ case INTR_TYPE_2OP_IMM8: {
+ SDValue Src2 = Op.getOperand(2);
+
+ if (IntrData->Type == INTR_TYPE_2OP_IMM8)
+ Src2 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src2);
+
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
@@ -20420,12 +20426,12 @@ SDValue X86TargetLowering::LowerINTRINSI
SDValue Rnd = Op.getOperand(3);
if (!isRoundModeCurDirection(Rnd)) {
return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2), Rnd);
+ Op.getOperand(1), Src2, Rnd);
}
}
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
- Op.getOperand(1), Op.getOperand(2));
+ Op.getOperand(1), Src2);
}
case INTR_TYPE_3OP:
case INTR_TYPE_3OP_IMM8: {
@@ -20537,16 +20543,12 @@ SDValue X86TargetLowering::LowerINTRINSI
RoundingMode, Sae),
Mask, Src0, Subtarget, DAG);
}
- case INTR_TYPE_2OP_MASK:
- case INTR_TYPE_2OP_IMM8_MASK: {
+ case INTR_TYPE_2OP_MASK: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue PassThru = Op.getOperand(3);
SDValue Mask = Op.getOperand(4);
- if (IntrData->Type == INTR_TYPE_2OP_IMM8_MASK)
- Src2 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src2);
-
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
Modified: llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h?rev=336035&r1=336034&r2=336035&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h (original)
+++ llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h Fri Jun 29 18:32:04 2018
@@ -22,11 +22,11 @@ namespace llvm {
enum IntrinsicType : uint16_t {
GATHER, SCATTER, PREFETCH, RDSEED, RDRAND, RDPMC, RDTSC, XTEST, XGETBV, ADX, FPCLASS, FPCLASSS,
INTR_TYPE_1OP, INTR_TYPE_2OP, INTR_TYPE_3OP, INTR_TYPE_4OP,
- INTR_TYPE_1OP_RM, INTR_TYPE_3OP_RM, INTR_TYPE_3OP_IMM8,
+ INTR_TYPE_1OP_RM, INTR_TYPE_2OP_IMM8, INTR_TYPE_3OP_RM, INTR_TYPE_3OP_IMM8,
CMP_MASK, CMP_MASK_CC,CMP_MASK_SCALAR_CC, VSHIFT, COMI, COMI_RM,
CVTPD2PS, CVTPD2PS_MASK,
INTR_TYPE_1OP_MASK, INTR_TYPE_1OP_MASK_RM,
- INTR_TYPE_2OP_MASK, INTR_TYPE_2OP_MASK_RM, INTR_TYPE_2OP_IMM8_MASK,
+ INTR_TYPE_2OP_MASK, INTR_TYPE_2OP_MASK_RM,
INTR_TYPE_3OP_MASK,
FMA_OP_MASK, FMA_OP_MASKZ, FMA_OP_MASK3,
FMA_OP_SCALAR_MASK, FMA_OP_SCALAR_MASKZ, FMA_OP_SCALAR_MASK3,
@@ -804,30 +804,6 @@ static const IntrinsicData IntrinsicsWi
X86ISD::MULTISHIFT, 0),
X86_INTRINSIC_DATA(avx512_mask_pmultishift_qb_512, INTR_TYPE_2OP_MASK,
X86ISD::MULTISHIFT, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_d_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_d_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_d_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_q_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_q_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prol_q_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prolv_d_128, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
- X86_INTRINSIC_DATA(avx512_mask_prolv_d_256, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
- X86_INTRINSIC_DATA(avx512_mask_prolv_d_512, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
- X86_INTRINSIC_DATA(avx512_mask_prolv_q_128, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
- X86_INTRINSIC_DATA(avx512_mask_prolv_q_256, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
- X86_INTRINSIC_DATA(avx512_mask_prolv_q_512, INTR_TYPE_2OP_MASK, ISD::ROTL, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_d_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_d_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_d_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_q_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_q_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_pror_q_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTRI, 0),
- X86_INTRINSIC_DATA(avx512_mask_prorv_d_128, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
- X86_INTRINSIC_DATA(avx512_mask_prorv_d_256, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
- X86_INTRINSIC_DATA(avx512_mask_prorv_d_512, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
- X86_INTRINSIC_DATA(avx512_mask_prorv_q_128, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
- X86_INTRINSIC_DATA(avx512_mask_prorv_q_256, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
- X86_INTRINSIC_DATA(avx512_mask_prorv_q_512, INTR_TYPE_2OP_MASK, ISD::ROTR, 0),
X86_INTRINSIC_DATA(avx512_mask_psubs_b_128, INTR_TYPE_2OP_MASK, X86ISD::SUBS, 0),
X86_INTRINSIC_DATA(avx512_mask_psubs_b_256, INTR_TYPE_2OP_MASK, X86ISD::SUBS, 0),
X86_INTRINSIC_DATA(avx512_mask_psubs_b_512, INTR_TYPE_2OP_MASK, X86ISD::SUBS, 0),
@@ -1098,6 +1074,30 @@ static const IntrinsicData IntrinsicsWi
X86_INTRINSIC_DATA(avx512_pmul_hr_sw_512, INTR_TYPE_2OP, X86ISD::MULHRS, 0),
X86_INTRINSIC_DATA(avx512_pmulh_w_512, INTR_TYPE_2OP, ISD::MULHS, 0),
X86_INTRINSIC_DATA(avx512_pmulhu_w_512, INTR_TYPE_2OP, ISD::MULHU, 0),
+ X86_INTRINSIC_DATA(avx512_prol_d_128, INTR_TYPE_2OP_IMM8, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_prol_d_256, INTR_TYPE_2OP_IMM8, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_prol_d_512, INTR_TYPE_2OP_IMM8, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_prol_q_128, INTR_TYPE_2OP_IMM8, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_prol_q_256, INTR_TYPE_2OP_IMM8, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_prol_q_512, INTR_TYPE_2OP_IMM8, X86ISD::VROTLI, 0),
+ X86_INTRINSIC_DATA(avx512_prolv_d_128, INTR_TYPE_2OP, ISD::ROTL, 0),
+ X86_INTRINSIC_DATA(avx512_prolv_d_256, INTR_TYPE_2OP, ISD::ROTL, 0),
+ X86_INTRINSIC_DATA(avx512_prolv_d_512, INTR_TYPE_2OP, ISD::ROTL, 0),
+ X86_INTRINSIC_DATA(avx512_prolv_q_128, INTR_TYPE_2OP, ISD::ROTL, 0),
+ X86_INTRINSIC_DATA(avx512_prolv_q_256, INTR_TYPE_2OP, ISD::ROTL, 0),
+ X86_INTRINSIC_DATA(avx512_prolv_q_512, INTR_TYPE_2OP, ISD::ROTL, 0),
+ X86_INTRINSIC_DATA(avx512_pror_d_128, INTR_TYPE_2OP_IMM8, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_pror_d_256, INTR_TYPE_2OP_IMM8, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_pror_d_512, INTR_TYPE_2OP_IMM8, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_pror_q_128, INTR_TYPE_2OP_IMM8, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_pror_q_256, INTR_TYPE_2OP_IMM8, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_pror_q_512, INTR_TYPE_2OP_IMM8, X86ISD::VROTRI, 0),
+ X86_INTRINSIC_DATA(avx512_prorv_d_128, INTR_TYPE_2OP, ISD::ROTR, 0),
+ X86_INTRINSIC_DATA(avx512_prorv_d_256, INTR_TYPE_2OP, ISD::ROTR, 0),
+ X86_INTRINSIC_DATA(avx512_prorv_d_512, INTR_TYPE_2OP, ISD::ROTR, 0),
+ X86_INTRINSIC_DATA(avx512_prorv_q_128, INTR_TYPE_2OP, ISD::ROTR, 0),
+ X86_INTRINSIC_DATA(avx512_prorv_q_256, INTR_TYPE_2OP, ISD::ROTR, 0),
+ X86_INTRINSIC_DATA(avx512_prorv_q_512, INTR_TYPE_2OP, ISD::ROTR, 0),
X86_INTRINSIC_DATA(avx512_psad_bw_512, INTR_TYPE_2OP, X86ISD::PSADBW, 0),
X86_INTRINSIC_DATA(avx512_pshuf_b_512, INTR_TYPE_2OP, X86ISD::PSHUFB, 0),
X86_INTRINSIC_DATA(avx512_psll_d_512, INTR_TYPE_2OP, X86ISD::VSHL, 0),
Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll?rev=336035&r1=336034&r2=336035&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll Fri Jun 29 18:32:04 2018
@@ -9222,6 +9222,440 @@ entry:
ret <16 x float> %0
}
+define <8 x i64> @test_mm512_rol_epi32(<8 x i64> %__A) local_unnamed_addr #0 {
+; CHECK-LABEL: test_mm512_rol_epi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprold $5, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = tail call <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32> %0, i32 5)
+ %2 = bitcast <16 x i32> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+declare <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32>, i32) #1
+
+define <8 x i64> @test_mm512_mask_rol_epi32(<8 x i64> %__W, i16 zeroext %__U, <8 x i64> %__A) {
+; X86-LABEL: test_mm512_mask_rol_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vprold $5, %zmm1, %zmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_rol_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprold $5, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = tail call <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32> %0, i32 5)
+ %2 = bitcast <8 x i64> %__W to <16 x i32>
+ %3 = bitcast i16 %__U to <16 x i1>
+ %4 = select <16 x i1> %3, <16 x i32> %1, <16 x i32> %2
+ %5 = bitcast <16 x i32> %4 to <8 x i64>
+ ret <8 x i64> %5
+}
+
+define <8 x i64> @test_mm512_maskz_rol_epi32(i16 zeroext %__U, <8 x i64> %__A) {
+; X86-LABEL: test_mm512_maskz_rol_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vprold $5, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_rol_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprold $5, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = tail call <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32> %0, i32 5)
+ %2 = bitcast i16 %__U to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> zeroinitializer
+ %4 = bitcast <16 x i32> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_rol_epi64(<8 x i64> %__A) {
+; CHECK-LABEL: test_mm512_rol_epi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprolq $5, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64> %__A, i32 5)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64>, i32) #1
+
+define <8 x i64> @test_mm512_mask_rol_epi64(<8 x i64> %__W, i8 zeroext %__U, <8 x i64> %__A) {
+; X86-LABEL: test_mm512_mask_rol_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolq $5, %zmm1, %zmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_rol_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolq $5, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64> %__A, i32 5)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> %__W
+ ret <8 x i64> %2
+}
+
+define <8 x i64> @test_mm512_maskz_rol_epi64(i8 zeroext %__U, <8 x i64> %__A) {
+; X86-LABEL: test_mm512_maskz_rol_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolq $5, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_rol_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolq $5, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64> %__A, i32 5)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> zeroinitializer
+ ret <8 x i64> %2
+}
+
+define <8 x i64> @test_mm512_rolv_epi32(<8 x i64> %__A, <8 x i64> %__B) {
+; CHECK-LABEL: test_mm512_rolv_epi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprolvd %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = bitcast <8 x i64> %__B to <16 x i32>
+ %2 = tail call <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32> %0, <16 x i32> %1)
+ %3 = bitcast <16 x i32> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define <8 x i64> @test_mm512_mask_rolv_epi32(<8 x i64> %__W, i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X86-LABEL: test_mm512_mask_rolv_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vprolvd %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_rolv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolvd %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = bitcast <8 x i64> %__B to <16 x i32>
+ %2 = tail call <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32> %0, <16 x i32> %1)
+ %3 = bitcast <8 x i64> %__W to <16 x i32>
+ %4 = bitcast i16 %__U to <16 x i1>
+ %5 = select <16 x i1> %4, <16 x i32> %2, <16 x i32> %3
+ %6 = bitcast <16 x i32> %5 to <8 x i64>
+ ret <8 x i64> %6
+}
+
+define <8 x i64> @test_mm512_maskz_rolv_epi32(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X86-LABEL: test_mm512_maskz_rolv_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vprolvd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_rolv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolvd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = bitcast <8 x i64> %__B to <16 x i32>
+ %2 = tail call <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32> %0, <16 x i32> %1)
+ %3 = bitcast i16 %__U to <16 x i1>
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> zeroinitializer
+ %5 = bitcast <16 x i32> %4 to <8 x i64>
+ ret <8 x i64> %5
+}
+
+define <8 x i64> @test_mm512_rolv_epi64(<8 x i64> %__A, <8 x i64> %__B) {
+; CHECK-LABEL: test_mm512_rolv_epi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprolvq %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64> %__A, <8 x i64> %__B)
+ ret <8 x i64> %0
+}
+
+define <8 x i64> @test_mm512_mask_rolv_epi64(<8 x i64> %__W, i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X86-LABEL: test_mm512_mask_rolv_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolvq %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_rolv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolvq %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64> %__A, <8 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> %__W
+ ret <8 x i64> %2
+}
+
+define <8 x i64> @test_mm512_maskz_rolv_epi64(i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X86-LABEL: test_mm512_maskz_rolv_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolvq %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_rolv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolvq %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64> %__A, <8 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> zeroinitializer
+ ret <8 x i64> %2
+}
+
+define <8 x i64> @test_mm512_ror_epi32(<8 x i64> %__A) {
+; CHECK-LABEL: test_mm512_ror_epi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprord $5, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = tail call <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32> %0, i32 5)
+ %2 = bitcast <16 x i32> %1 to <8 x i64>
+ ret <8 x i64> %2
+}
+
+declare <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32>, i32) #1
+
+define <8 x i64> @test_mm512_mask_ror_epi32(<8 x i64> %__W, i16 zeroext %__U, <8 x i64> %__A) {
+; X86-LABEL: test_mm512_mask_ror_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vprord $5, %zmm1, %zmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_ror_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprord $5, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = tail call <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32> %0, i32 5)
+ %2 = bitcast <8 x i64> %__W to <16 x i32>
+ %3 = bitcast i16 %__U to <16 x i1>
+ %4 = select <16 x i1> %3, <16 x i32> %1, <16 x i32> %2
+ %5 = bitcast <16 x i32> %4 to <8 x i64>
+ ret <8 x i64> %5
+}
+
+define <8 x i64> @test_mm512_maskz_ror_epi32(i16 zeroext %__U, <8 x i64> %__A) {
+; X86-LABEL: test_mm512_maskz_ror_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vprord $5, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_ror_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprord $5, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = tail call <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32> %0, i32 5)
+ %2 = bitcast i16 %__U to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> zeroinitializer
+ %4 = bitcast <16 x i32> %3 to <8 x i64>
+ ret <8 x i64> %4
+}
+
+define <8 x i64> @test_mm512_ror_epi64(<8 x i64> %__A) {
+; CHECK-LABEL: test_mm512_ror_epi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprorq $5, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64> %__A, i32 5)
+ ret <8 x i64> %0
+}
+
+declare <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64>, i32) #1
+
+define <8 x i64> @test_mm512_mask_ror_epi64(<8 x i64> %__W, i8 zeroext %__U, <8 x i64> %__A) {
+; X86-LABEL: test_mm512_mask_ror_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorq $5, %zmm1, %zmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_ror_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorq $5, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64> %__A, i32 5)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> %__W
+ ret <8 x i64> %2
+}
+
+define <8 x i64> @test_mm512_maskz_ror_epi64(i8 zeroext %__U, <8 x i64> %__A) {
+; X86-LABEL: test_mm512_maskz_ror_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorq $5, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_ror_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorq $5, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64> %__A, i32 5)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> zeroinitializer
+ ret <8 x i64> %2
+}
+
+define <8 x i64> @test_mm512_rorv_epi32(<8 x i64> %__A, <8 x i64> %__B) {
+; CHECK-LABEL: test_mm512_rorv_epi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprorvd %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = bitcast <8 x i64> %__B to <16 x i32>
+ %2 = tail call <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32> %0, <16 x i32> %1)
+ %3 = bitcast <16 x i32> %2 to <8 x i64>
+ ret <8 x i64> %3
+}
+
+define <8 x i64> @test_mm512_mask_rorv_epi32(<8 x i64> %__W, i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X86-LABEL: test_mm512_mask_rorv_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vprorvd %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_rorv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorvd %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = bitcast <8 x i64> %__B to <16 x i32>
+ %2 = tail call <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32> %0, <16 x i32> %1)
+ %3 = bitcast <8 x i64> %__W to <16 x i32>
+ %4 = bitcast i16 %__U to <16 x i1>
+ %5 = select <16 x i1> %4, <16 x i32> %2, <16 x i32> %3
+ %6 = bitcast <16 x i32> %5 to <8 x i64>
+ ret <8 x i64> %6
+}
+
+define <8 x i64> @test_mm512_maskz_rorv_epi32(i16 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X86-LABEL: test_mm512_maskz_rorv_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT: vprorvd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_rorv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorvd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__A to <16 x i32>
+ %1 = bitcast <8 x i64> %__B to <16 x i32>
+ %2 = tail call <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32> %0, <16 x i32> %1)
+ %3 = bitcast i16 %__U to <16 x i1>
+ %4 = select <16 x i1> %3, <16 x i32> %2, <16 x i32> zeroinitializer
+ %5 = bitcast <16 x i32> %4 to <8 x i64>
+ ret <8 x i64> %5
+}
+
+define <8 x i64> @test_mm512_rorv_epi64(<8 x i64> %__A, <8 x i64> %__B) {
+; CHECK-LABEL: test_mm512_rorv_epi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprorvq %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64> %__A, <8 x i64> %__B)
+ ret <8 x i64> %0
+}
+
+define <8 x i64> @test_mm512_mask_rorv_epi64(<8 x i64> %__W, i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X86-LABEL: test_mm512_mask_rorv_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorvq %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_rorv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorvq %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64> %__A, <8 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> %__W
+ ret <8 x i64> %2
+}
+
+define <8 x i64> @test_mm512_maskz_rorv_epi64(i8 zeroext %__U, <8 x i64> %__A, <8 x i64> %__B) {
+; X86-LABEL: test_mm512_maskz_rorv_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorvq %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_rorv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorvq %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64> %__A, <8 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %2 = select <8 x i1> %1, <8 x i64> %0, <8 x i64> zeroinitializer
+ ret <8 x i64> %2
+}
+
declare <8 x double> @llvm.fma.v8f64(<8 x double>, <8 x double>, <8 x double>) #9
declare <16 x float> @llvm.fma.v16f32(<16 x float>, <16 x float>, <16 x float>) #9
declare float @llvm.fma.f32(float, float, float) #9
@@ -9244,6 +9678,10 @@ declare <8 x float> @llvm.x86.avx.min.ps
declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>)
declare <8 x double> @llvm.sqrt.v8f64(<8 x double>)
declare <16 x float> @llvm.sqrt.v16f32(<16 x float>)
+declare <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32>, <16 x i32>)
+declare <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64>, <8 x i64>)
+declare <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32>, <16 x i32>)
+declare <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64>, <8 x i64>)
!0 = !{i32 1}
Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll?rev=336035&r1=336034&r2=336035&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll Fri Jun 29 18:32:04 2018
@@ -8540,3 +8540,247 @@ define <16 x float> @test_maskz_sqrt_rou
ret <16 x float> %res
}
declare <16 x float> @llvm.x86.avx512.mask.sqrt.ps.512(<16 x float>, <16 x float>, i16, i32) nounwind readnone
+
+declare <16 x i32> @llvm.x86.avx512.mask.prolv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+
+define <16 x i32>@test_int_x86_avx512_mask_prolv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prolv_d_512:
+; X86: ## %bb.0:
+; X86-NEXT: vprolvd %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0x7d,0x48,0x15,0xd9]
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vprolvd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x15,0xd1]
+; X86-NEXT: vprolvd %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x15,0xc1]
+; X86-NEXT: vpaddd %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc3]
+; X86-NEXT: vpaddd %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x6d,0x48,0xfe,0xc0]
+; X86-NEXT: retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prolv_d_512:
+; X64: ## %bb.0:
+; X64-NEXT: vprolvd %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0x7d,0x48,0x15,0xd9]
+; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT: vprolvd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x15,0xd1]
+; X64-NEXT: vprolvd %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x15,0xc1]
+; X64-NEXT: vpaddd %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc3]
+; X64-NEXT: vpaddd %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x6d,0x48,0xfe,0xc0]
+; X64-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i32> @llvm.x86.avx512.mask.prolv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
+ %res1 = call <16 x i32> @llvm.x86.avx512.mask.prolv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> zeroinitializer, i16 %x3)
+ %res2 = call <16 x i32> @llvm.x86.avx512.mask.prolv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
+ %res3 = add <16 x i32> %res, %res1
+ %res4 = add <16 x i32> %res3, %res2
+ ret <16 x i32> %res4
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+
+define <8 x i64>@test_int_x86_avx512_mask_prolv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prolv_q_512:
+; X86: ## %bb.0:
+; X86-NEXT: vprolvq %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0xfd,0x48,0x15,0xd9]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprolvq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x15,0xd1]
+; X86-NEXT: vprolvq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x15,0xc1]
+; X86-NEXT: vpaddq %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc3]
+; X86-NEXT: vpaddq %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xed,0x48,0xd4,0xc0]
+; X86-NEXT: retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prolv_q_512:
+; X64: ## %bb.0:
+; X64-NEXT: vprolvq %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0xfd,0x48,0x15,0xd9]
+; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT: vprolvq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x15,0xd1]
+; X64-NEXT: vprolvq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x15,0xc1]
+; X64-NEXT: vpaddq %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc3]
+; X64-NEXT: vpaddq %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xed,0x48,0xd4,0xc0]
+; X64-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
+ %res1 = call <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
+ %res3 = add <8 x i64> %res, %res1
+ %res4 = add <8 x i64> %res3, %res2
+ ret <8 x i64> %res4
+}
+
+declare <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+
+define <16 x i32>@test_int_x86_avx512_mask_prorv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prorv_d_512:
+; X86: ## %bb.0:
+; X86-NEXT: vprorvd %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0x7d,0x48,0x14,0xd9]
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT: vprorvd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x14,0xd1]
+; X86-NEXT: vprorvd %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x14,0xc1]
+; X86-NEXT: vpaddd %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc3]
+; X86-NEXT: vpaddd %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x6d,0x48,0xfe,0xc0]
+; X86-NEXT: retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prorv_d_512:
+; X64: ## %bb.0:
+; X64-NEXT: vprorvd %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0x7d,0x48,0x14,0xd9]
+; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT: vprorvd %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x14,0xd1]
+; X64-NEXT: vprorvd %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x14,0xc1]
+; X64-NEXT: vpaddd %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc3]
+; X64-NEXT: vpaddd %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0x6d,0x48,0xfe,0xc0]
+; X64-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
+ %res1 = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> zeroinitializer, i16 %x3)
+ %res2 = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
+ %res3 = add <16 x i32> %res, %res1
+ %res4 = add <16 x i32> %res3, %res2
+ ret <16 x i32> %res4
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+
+define <8 x i64>@test_int_x86_avx512_mask_prorv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prorv_q_512:
+; X86: ## %bb.0:
+; X86-NEXT: vprorvq %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0xfd,0x48,0x14,0xd9]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprorvq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x14,0xd1]
+; X86-NEXT: vprorvq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x14,0xc1]
+; X86-NEXT: vpaddq %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc3]
+; X86-NEXT: vpaddq %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xed,0x48,0xd4,0xc0]
+; X86-NEXT: retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prorv_q_512:
+; X64: ## %bb.0:
+; X64-NEXT: vprorvq %zmm1, %zmm0, %zmm3 ## encoding: [0x62,0xf2,0xfd,0x48,0x14,0xd9]
+; X64-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT: vprorvq %zmm1, %zmm0, %zmm2 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x14,0xd1]
+; X64-NEXT: vprorvq %zmm1, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xc9,0x14,0xc1]
+; X64-NEXT: vpaddq %zmm3, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc3]
+; X64-NEXT: vpaddq %zmm0, %zmm2, %zmm0 ## encoding: [0x62,0xf1,0xed,0x48,0xd4,0xc0]
+; X64-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
+ %res1 = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
+ %res3 = add <8 x i64> %res, %res1
+ %res4 = add <8 x i64> %res3, %res2
+ ret <8 x i64> %res4
+}
+
+declare <16 x i32> @llvm.x86.avx512.mask.prol.d.512(<16 x i32>, i32, <16 x i32>, i16)
+
+define <16 x i32>@test_int_x86_avx512_mask_prol_d_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prol_d_512:
+; X86: ## %bb.0:
+; X86-NEXT: vprold $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0x72,0xc8,0x03]
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT: vprold $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x49,0x72,0xc8,0x03]
+; X86-NEXT: vprold $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0x72,0xc8,0x03]
+; X86-NEXT: vpaddd %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc2]
+; X86-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x75,0x48,0xfe,0xc0]
+; X86-NEXT: retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prol_d_512:
+; X64: ## %bb.0:
+; X64-NEXT: vprold $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0x72,0xc8,0x03]
+; X64-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT: vprold $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x49,0x72,0xc8,0x03]
+; X64-NEXT: vprold $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0x72,0xc8,0x03]
+; X64-NEXT: vpaddd %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc2]
+; X64-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x75,0x48,0xfe,0xc0]
+; X64-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i32> @llvm.x86.avx512.mask.prol.d.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 %x3)
+ %res1 = call <16 x i32> @llvm.x86.avx512.mask.prol.d.512(<16 x i32> %x0, i32 3, <16 x i32> zeroinitializer, i16 %x3)
+ %res2 = call <16 x i32> @llvm.x86.avx512.mask.prol.d.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 -1)
+ %res3 = add <16 x i32> %res, %res1
+ %res4 = add <16 x i32> %res3, %res2
+ ret <16 x i32> %res4
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.prol.q.512(<8 x i64>, i32, <8 x i64>, i8)
+
+define <8 x i64>@test_int_x86_avx512_mask_prol_q_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prol_q_512:
+; X86: ## %bb.0:
+; X86-NEXT: vprolq $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0xed,0x48,0x72,0xc8,0x03]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprolq $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x49,0x72,0xc8,0x03]
+; X86-NEXT: vprolq $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x72,0xc8,0x03]
+; X86-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc2]
+; X86-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xf5,0x48,0xd4,0xc0]
+; X86-NEXT: retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prol_q_512:
+; X64: ## %bb.0:
+; X64-NEXT: vprolq $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0xed,0x48,0x72,0xc8,0x03]
+; X64-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT: vprolq $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x49,0x72,0xc8,0x03]
+; X64-NEXT: vprolq $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x72,0xc8,0x03]
+; X64-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc2]
+; X64-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xf5,0x48,0xd4,0xc0]
+; X64-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x i64> @llvm.x86.avx512.mask.prol.q.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 %x3)
+ %res1 = call <8 x i64> @llvm.x86.avx512.mask.prol.q.512(<8 x i64> %x0, i32 3, <8 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <8 x i64> @llvm.x86.avx512.mask.prol.q.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 -1)
+ %res3 = add <8 x i64> %res, %res1
+ %res4 = add <8 x i64> %res3, %res2
+ ret <8 x i64> %res4
+}
+
+declare <16 x i32> @llvm.x86.avx512.mask.pror.d.512(<16 x i32>, i32, <16 x i32>, i16)
+
+define <16 x i32>@test_int_x86_avx512_mask_pror_d_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_pror_d_512:
+; X86: ## %bb.0:
+; X86-NEXT: vprord $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0x72,0xc0,0x03]
+; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 ## encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x08]
+; X86-NEXT: vprord $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x49,0x72,0xc0,0x03]
+; X86-NEXT: vprord $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0x72,0xc0,0x03]
+; X86-NEXT: vpaddd %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc2]
+; X86-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x75,0x48,0xfe,0xc0]
+; X86-NEXT: retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_pror_d_512:
+; X64: ## %bb.0:
+; X64-NEXT: vprord $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0x72,0xc0,0x03]
+; X64-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT: vprord $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x49,0x72,0xc0,0x03]
+; X64-NEXT: vprord $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xc9,0x72,0xc0,0x03]
+; X64-NEXT: vpaddd %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfe,0xc2]
+; X64-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0x75,0x48,0xfe,0xc0]
+; X64-NEXT: retq ## encoding: [0xc3]
+ %res = call <16 x i32> @llvm.x86.avx512.mask.pror.d.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 %x3)
+ %res1 = call <16 x i32> @llvm.x86.avx512.mask.pror.d.512(<16 x i32> %x0, i32 3, <16 x i32> zeroinitializer, i16 %x3)
+ %res2 = call <16 x i32> @llvm.x86.avx512.mask.pror.d.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 -1)
+ %res3 = add <16 x i32> %res, %res1
+ %res4 = add <16 x i32> %res3, %res2
+ ret <16 x i32> %res4
+}
+
+declare <8 x i64> @llvm.x86.avx512.mask.pror.q.512(<8 x i64>, i32, <8 x i64>, i8)
+
+define <8 x i64>@test_int_x86_avx512_mask_pror_q_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_pror_q_512:
+; X86: ## %bb.0:
+; X86-NEXT: vprorq $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0xed,0x48,0x72,0xc0,0x03]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ## encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT: kmovw %eax, %k1 ## encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprorq $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x49,0x72,0xc0,0x03]
+; X86-NEXT: vprorq $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x72,0xc0,0x03]
+; X86-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc2]
+; X86-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xf5,0x48,0xd4,0xc0]
+; X86-NEXT: retl ## encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_pror_q_512:
+; X64: ## %bb.0:
+; X64-NEXT: vprorq $3, %zmm0, %zmm2 ## encoding: [0x62,0xf1,0xed,0x48,0x72,0xc0,0x03]
+; X64-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT: vprorq $3, %zmm0, %zmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x49,0x72,0xc0,0x03]
+; X64-NEXT: vprorq $3, %zmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xc9,0x72,0xc0,0x03]
+; X64-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0xfd,0x48,0xd4,0xc2]
+; X64-NEXT: vpaddq %zmm0, %zmm1, %zmm0 ## encoding: [0x62,0xf1,0xf5,0x48,0xd4,0xc0]
+; X64-NEXT: retq ## encoding: [0xc3]
+ %res = call <8 x i64> @llvm.x86.avx512.mask.pror.q.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 %x3)
+ %res1 = call <8 x i64> @llvm.x86.avx512.mask.pror.q.512(<8 x i64> %x0, i32 3, <8 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <8 x i64> @llvm.x86.avx512.mask.pror.q.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 -1)
+ %res3 = add <8 x i64> %res, %res1
+ %res4 = add <8 x i64> %res3, %res2
+ ret <8 x i64> %res4
+}
Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll?rev=336035&r1=336034&r2=336035&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll Fri Jun 29 18:32:04 2018
@@ -3849,83 +3849,195 @@ define i32 @test_x86_avx512_ucomi_ss_lt(
declare i32 @llvm.x86.avx512.vcomi.ss(<4 x float>, <4 x float>, i32, i32)
-declare <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
+declare <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32>, <16 x i32>)
+
+define <16 x i32>@test_int_x86_avx512_mask_prolv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_prolv_d_512:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: vprolvd %zmm1, %zmm0, %zmm3
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vprolvd %zmm1, %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vprolvd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddd %zmm3, %zmm0, %zmm0
+; CHECK-NEXT: vpaddd %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %1 = call <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32> %x0, <16 x i32> %x1)
+ %2 = bitcast i16 %x3 to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x2
+ %4 = call <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32> %x0, <16 x i32> %x1)
+ %5 = bitcast i16 %x3 to <16 x i1>
+ %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> zeroinitializer
+ %7 = call <16 x i32> @llvm.x86.avx512.prolv.d.512(<16 x i32> %x0, <16 x i32> %x1)
+ %res3 = add <16 x i32> %3, %6
+ %res4 = add <16 x i32> %res3, %7
+ ret <16 x i32> %res4
+}
+
+declare <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64>, <8 x i64>)
+
+define <8 x i64>@test_int_x86_avx512_mask_prolv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_prolv_q_512:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: vprolvq %zmm1, %zmm0, %zmm3
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vprolvq %zmm1, %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vprolvq %zmm1, %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm3, %zmm0, %zmm0
+; CHECK-NEXT: vpaddq %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: retq
+ %1 = call <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64> %x0, <8 x i64> %x1)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x2
+ %4 = call <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64> %x0, <8 x i64> %x1)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %6 = select <8 x i1> %5, <8 x i64> %4, <8 x i64> zeroinitializer
+ %7 = call <8 x i64> @llvm.x86.avx512.prolv.q.512(<8 x i64> %x0, <8 x i64> %x1)
+ %res3 = add <8 x i64> %3, %6
+ %res4 = add <8 x i64> %res3, %7
+ ret <8 x i64> %res4
+}
+
+declare <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32>, <16 x i32>)
define <16 x i32>@test_int_x86_avx512_mask_prorv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prorv_d_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vprorvd %zmm1, %zmm0, %zmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vprorvd %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vprorvd %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vpaddd %zmm3, %zmm0, %zmm0
; CHECK-NEXT: vpaddd %zmm0, %zmm2, %zmm0
; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
- %res1 = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> zeroinitializer, i16 %x3)
- %res2 = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
- %res3 = add <16 x i32> %res, %res1
- %res4 = add <16 x i32> %res3, %res2
+ %1 = call <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1)
+ %2 = bitcast i16 %x3 to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x2
+ %4 = call <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1)
+ %5 = bitcast i16 %x3 to <16 x i1>
+ %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> zeroinitializer
+ %7 = call <16 x i32> @llvm.x86.avx512.prorv.d.512(<16 x i32> %x0, <16 x i32> %x1)
+ %res3 = add <16 x i32> %3, %6
+ %res4 = add <16 x i32> %res3, %7
ret <16 x i32> %res4
}
-declare <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
+declare <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64>, <8 x i64>)
define <8 x i64>@test_int_x86_avx512_mask_prorv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prorv_q_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vprorvq %zmm1, %zmm0, %zmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vprorvq %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vprorvq %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm3, %zmm0, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; CHECK-NEXT: retq
- %res = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
- %res1 = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> zeroinitializer, i8 %x3)
- %res2 = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
- %res3 = add <8 x i64> %res, %res1
- %res4 = add <8 x i64> %res3, %res2
+ %1 = call <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x2
+ %4 = call <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %6 = select <8 x i1> %5, <8 x i64> %4, <8 x i64> zeroinitializer
+ %7 = call <8 x i64> @llvm.x86.avx512.prorv.q.512(<8 x i64> %x0, <8 x i64> %x1)
+ %res3 = add <8 x i64> %3, %6
+ %res4 = add <8 x i64> %res3, %7
ret <8 x i64> %res4
}
-declare <16 x i32> @llvm.x86.avx512.mask.prol.d.512(<16 x i32>, i32, <16 x i32>, i16)
+declare <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32>, i32)
define <16 x i32>@test_int_x86_avx512_mask_prol_d_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prol_d_512:
; CHECK: ## %bb.0:
+; CHECK-NEXT: vprold $3, %zmm0, %zmm2
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vprold $3, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vprold $3, %zmm0, %zmm2 {%k1} {z}
-; CHECK-NEXT: vprold $3, %zmm0, %zmm0
-; CHECK-NEXT: vpaddd %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: vprold $3, %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
- %res = call <16 x i32> @llvm.x86.avx512.mask.prol.d.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 %x3)
- %res1 = call <16 x i32> @llvm.x86.avx512.mask.prol.d.512(<16 x i32> %x0, i32 3, <16 x i32> zeroinitializer, i16 %x3)
- %res2 = call <16 x i32> @llvm.x86.avx512.mask.prol.d.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 -1)
- %res3 = add <16 x i32> %res, %res1
- %res4 = add <16 x i32> %res3, %res2
+ %1 = call <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32> %x0, i32 3)
+ %2 = bitcast i16 %x3 to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x2
+ %4 = call <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32> %x0, i32 3)
+ %5 = bitcast i16 %x3 to <16 x i1>
+ %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> zeroinitializer
+ %7 = call <16 x i32> @llvm.x86.avx512.prol.d.512(<16 x i32> %x0, i32 3)
+ %res3 = add <16 x i32> %3, %6
+ %res4 = add <16 x i32> %res3, %7
ret <16 x i32> %res4
}
-declare <8 x i64> @llvm.x86.avx512.mask.prol.q.512(<8 x i64>, i32, <8 x i64>, i8)
+declare <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64>, i32)
define <8 x i64>@test_int_x86_avx512_mask_prol_q_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_prol_q_512:
; CHECK: ## %bb.0:
+; CHECK-NEXT: vprolq $3, %zmm0, %zmm2
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vprolq $3, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vprolq $3, %zmm0, %zmm2 {%k1} {z}
-; CHECK-NEXT: vprolq $3, %zmm0, %zmm0
-; CHECK-NEXT: vpaddq %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: vprolq $3, %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
+; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %1 = call <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64> %x0, i32 3)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x2
+ %4 = call <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64> %x0, i32 3)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %6 = select <8 x i1> %5, <8 x i64> %4, <8 x i64> zeroinitializer
+ %7 = call <8 x i64> @llvm.x86.avx512.prol.q.512(<8 x i64> %x0, i32 3)
+ %res3 = add <8 x i64> %3, %6
+ %res4 = add <8 x i64> %res3, %7
+ ret <8 x i64> %res4
+}
+
+declare <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32>, i32)
+
+define <16 x i32>@test_int_x86_avx512_mask_pror_d_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_pror_d_512:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: vprord $3, %zmm0, %zmm2
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vprord $3, %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vprord $3, %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0
+; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %1 = call <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32> %x0, i32 3)
+ %2 = bitcast i16 %x3 to <16 x i1>
+ %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x2
+ %4 = call <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32> %x0, i32 3)
+ %5 = bitcast i16 %x3 to <16 x i1>
+ %6 = select <16 x i1> %5, <16 x i32> %4, <16 x i32> zeroinitializer
+ %7 = call <16 x i32> @llvm.x86.avx512.pror.d.512(<16 x i32> %x0, i32 3)
+ %res3 = add <16 x i32> %3, %6
+ %res4 = add <16 x i32> %res3, %7
+ ret <16 x i32> %res4
+}
+
+declare <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64>, i32)
+
+define <8 x i64>@test_int_x86_avx512_mask_pror_q_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
+; CHECK-LABEL: test_int_x86_avx512_mask_pror_q_512:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: vprorq $3, %zmm0, %zmm2
+; CHECK-NEXT: kmovw %esi, %k1
+; CHECK-NEXT: vprorq $3, %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vprorq $3, %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
- %res = call <8 x i64> @llvm.x86.avx512.mask.prol.q.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 %x3)
- %res1 = call <8 x i64> @llvm.x86.avx512.mask.prol.q.512(<8 x i64> %x0, i32 3, <8 x i64> zeroinitializer, i8 %x3)
- %res2 = call <8 x i64> @llvm.x86.avx512.mask.prol.q.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 -1)
- %res3 = add <8 x i64> %res, %res1
- %res4 = add <8 x i64> %res3, %res2
+ %1 = call <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64> %x0, i32 3)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x2
+ %4 = call <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64> %x0, i32 3)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %6 = select <8 x i1> %5, <8 x i64> %4, <8 x i64> zeroinitializer
+ %7 = call <8 x i64> @llvm.x86.avx512.pror.q.512(<8 x i64> %x0, i32 3)
+ %res3 = add <8 x i64> %3, %6
+ %res4 = add <8 x i64> %res3, %7
ret <8 x i64> %res4
}
Modified: llvm/trunk/test/CodeGen/X86/avx512-rotate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-rotate.ll?rev=336035&r1=336034&r2=336035&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-rotate.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-rotate.ll Fri Jun 29 18:32:04 2018
@@ -15,22 +15,22 @@ declare <16 x i32> @llvm.x86.avx512.mask
define <16 x i32> @test_splat_rol_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
; KNL-LABEL: test_splat_rol_v16i32:
; KNL: # %bb.0:
+; KNL-NEXT: vprold $5, %zmm0, %zmm2
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vprold $5, %zmm0, %zmm1 {%k1}
-; KNL-NEXT: vprold $5, %zmm0, %zmm2 {%k1} {z}
-; KNL-NEXT: vpaddd %zmm2, %zmm1, %zmm1
-; KNL-NEXT: vprold $5, %zmm0, %zmm0
+; KNL-NEXT: vprold $5, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpaddd %zmm0, %zmm1, %zmm0
+; KNL-NEXT: vpaddd %zmm2, %zmm0, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_splat_rol_v16i32:
; SKX: # %bb.0:
+; SKX-NEXT: vprold $5, %zmm0, %zmm2
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vprold $5, %zmm0, %zmm1 {%k1}
-; SKX-NEXT: vprold $5, %zmm0, %zmm2 {%k1} {z}
-; SKX-NEXT: vpaddd %zmm2, %zmm1, %zmm1
-; SKX-NEXT: vprold $5, %zmm0, %zmm0
+; SKX-NEXT: vprold $5, %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: vpaddd %zmm0, %zmm1, %zmm0
+; SKX-NEXT: vpaddd %zmm2, %zmm0, %zmm0
; SKX-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.prolv.d.512(<16 x i32> %x0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>, <16 x i32> %x1, i16 %x2)
%res1 = call <16 x i32> @llvm.x86.avx512.mask.prolv.d.512(<16 x i32> %x0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>, <16 x i32> zeroinitializer, i16 %x2)
@@ -43,22 +43,22 @@ define <16 x i32> @test_splat_rol_v16i32
define <8 x i64>@test_splat_rol_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
; KNL-LABEL: test_splat_rol_v8i64:
; KNL: # %bb.0:
+; KNL-NEXT: vprolq $5, %zmm0, %zmm2
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vprolq $5, %zmm0, %zmm1 {%k1}
-; KNL-NEXT: vprolq $5, %zmm0, %zmm2 {%k1} {z}
-; KNL-NEXT: vpaddq %zmm2, %zmm1, %zmm1
-; KNL-NEXT: vprolq $5, %zmm0, %zmm0
+; KNL-NEXT: vprolq $5, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; KNL-NEXT: vpaddq %zmm2, %zmm0, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_splat_rol_v8i64:
; SKX: # %bb.0:
+; SKX-NEXT: vprolq $5, %zmm0, %zmm2
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vprolq $5, %zmm0, %zmm1 {%k1}
-; SKX-NEXT: vprolq $5, %zmm0, %zmm2 {%k1} {z}
-; SKX-NEXT: vpaddq %zmm2, %zmm1, %zmm1
-; SKX-NEXT: vprolq $5, %zmm0, %zmm0
+; SKX-NEXT: vprolq $5, %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; SKX-NEXT: vpaddq %zmm2, %zmm0, %zmm0
; SKX-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64> %x0, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>, <8 x i64> %x1, i8 %x2)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.prolv.q.512(<8 x i64> %x0, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>, <8 x i64> zeroinitializer, i8 %x2)
@@ -71,22 +71,22 @@ define <8 x i64>@test_splat_rol_v8i64(<8
define <16 x i32> @test_splat_ror_v16i32(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
; KNL-LABEL: test_splat_ror_v16i32:
; KNL: # %bb.0:
+; KNL-NEXT: vprord $5, %zmm0, %zmm2
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vprord $5, %zmm0, %zmm1 {%k1}
-; KNL-NEXT: vprord $5, %zmm0, %zmm2 {%k1} {z}
-; KNL-NEXT: vpaddd %zmm2, %zmm1, %zmm1
-; KNL-NEXT: vprord $5, %zmm0, %zmm0
+; KNL-NEXT: vprord $5, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpaddd %zmm0, %zmm1, %zmm0
+; KNL-NEXT: vpaddd %zmm2, %zmm0, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_splat_ror_v16i32:
; SKX: # %bb.0:
+; SKX-NEXT: vprord $5, %zmm0, %zmm2
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vprord $5, %zmm0, %zmm1 {%k1}
-; SKX-NEXT: vprord $5, %zmm0, %zmm2 {%k1} {z}
-; SKX-NEXT: vpaddd %zmm2, %zmm1, %zmm1
-; SKX-NEXT: vprord $5, %zmm0, %zmm0
+; SKX-NEXT: vprord $5, %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: vpaddd %zmm0, %zmm1, %zmm0
+; SKX-NEXT: vpaddd %zmm2, %zmm0, %zmm0
; SKX-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>, <16 x i32> %x1, i16 %x2)
%res1 = call <16 x i32> @llvm.x86.avx512.mask.prorv.d.512(<16 x i32> %x0, <16 x i32> <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>, <16 x i32> zeroinitializer, i16 %x2)
@@ -99,22 +99,22 @@ define <16 x i32> @test_splat_ror_v16i32
define <8 x i64>@test_splat_ror_v8i64(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
; KNL-LABEL: test_splat_ror_v8i64:
; KNL: # %bb.0:
+; KNL-NEXT: vprorq $5, %zmm0, %zmm2
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vprorq $5, %zmm0, %zmm1 {%k1}
-; KNL-NEXT: vprorq $5, %zmm0, %zmm2 {%k1} {z}
-; KNL-NEXT: vpaddq %zmm2, %zmm1, %zmm1
-; KNL-NEXT: vprorq $5, %zmm0, %zmm0
+; KNL-NEXT: vprorq $5, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; KNL-NEXT: vpaddq %zmm2, %zmm0, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test_splat_ror_v8i64:
; SKX: # %bb.0:
+; SKX-NEXT: vprorq $5, %zmm0, %zmm2
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vprorq $5, %zmm0, %zmm1 {%k1}
-; SKX-NEXT: vprorq $5, %zmm0, %zmm2 {%k1} {z}
-; SKX-NEXT: vpaddq %zmm2, %zmm1, %zmm1
-; SKX-NEXT: vprorq $5, %zmm0, %zmm0
+; SKX-NEXT: vprorq $5, %zmm0, %zmm0 {%k1} {z}
; SKX-NEXT: vpaddq %zmm0, %zmm1, %zmm0
+; SKX-NEXT: vpaddq %zmm2, %zmm0, %zmm0
; SKX-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>, <8 x i64> %x1, i8 %x2)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.prorv.q.512(<8 x i64> %x0, <8 x i64> <i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5, i64 5>, <8 x i64> zeroinitializer, i8 %x2)
Modified: llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll?rev=336035&r1=336034&r2=336035&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll Fri Jun 29 18:32:04 2018
@@ -6549,6 +6549,914 @@ entry:
declare <8 x float> @llvm.sqrt.v8f32(<8 x float>)
+define <2 x i64> @test_mm_rol_epi32(<2 x i64> %__A) {
+; CHECK-LABEL: test_mm_rol_epi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprold $5, %xmm0, %xmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = tail call <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32> %0, i32 5)
+ %2 = bitcast <4 x i32> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+declare <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32>, i32)
+
+define <2 x i64> @test_mm_mask_rol_epi32(<2 x i64> %__W, i8 zeroext %__U, <2 x i64> %__A) {
+; X86-LABEL: test_mm_mask_rol_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprold $5, %xmm1, %xmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_rol_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprold $5, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = tail call <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32> %0, i32 5)
+ %2 = bitcast <2 x i64> %__W to <4 x i32>
+ %3 = bitcast i8 %__U to <8 x i1>
+ %extract = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = select <4 x i1> %extract, <4 x i32> %1, <4 x i32> %2
+ %5 = bitcast <4 x i32> %4 to <2 x i64>
+ ret <2 x i64> %5
+}
+
+define <2 x i64> @test_mm_maskz_rol_epi32(i8 zeroext %__U, <2 x i64> %__A) {
+; X86-LABEL: test_mm_maskz_rol_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprold $5, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_rol_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprold $5, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = tail call <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32> %0, i32 5)
+ %2 = bitcast i8 %__U to <8 x i1>
+ %extract = shufflevector <8 x i1> %2, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract, <4 x i32> %1, <4 x i32> zeroinitializer
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+define <4 x i64> @test_mm256_rol_epi32(<4 x i64> %__A) {
+; CHECK-LABEL: test_mm256_rol_epi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprold $5, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = tail call <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32> %0, i32 5)
+ %2 = bitcast <8 x i32> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+declare <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32>, i32)
+
+define <4 x i64> @test_mm256_mask_rol_epi32(<4 x i64> %__W, i8 zeroext %__U, <4 x i64> %__A) {
+; X86-LABEL: test_mm256_mask_rol_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprold $5, %ymm1, %ymm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_rol_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprold $5, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = tail call <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32> %0, i32 5)
+ %2 = bitcast <4 x i64> %__W to <8 x i32>
+ %3 = bitcast i8 %__U to <8 x i1>
+ %4 = select <8 x i1> %3, <8 x i32> %1, <8 x i32> %2
+ %5 = bitcast <8 x i32> %4 to <4 x i64>
+ ret <4 x i64> %5
+}
+
+define <4 x i64> @test_mm256_maskz_rol_epi32(i8 zeroext %__U, <4 x i64> %__A) {
+; X86-LABEL: test_mm256_maskz_rol_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprold $5, %ymm0, %ymm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_rol_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprold $5, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = tail call <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32> %0, i32 5)
+ %2 = bitcast i8 %__U to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> zeroinitializer
+ %4 = bitcast <8 x i32> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define <2 x i64> @test_mm_rol_epi64(<2 x i64> %__A) {
+; CHECK-LABEL: test_mm_rol_epi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprolq $5, %xmm0, %xmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64> %__A, i32 5)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64>, i32)
+
+define <2 x i64> @test_mm_mask_rol_epi64(<2 x i64> %__W, i8 zeroext %__U, <2 x i64> %__A) {
+; X86-LABEL: test_mm_mask_rol_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolq $5, %xmm1, %xmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_rol_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolq $5, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64> %__A, i32 5)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> %__W
+ ret <2 x i64> %2
+}
+
+define <2 x i64> @test_mm_maskz_rol_epi64(i8 zeroext %__U, <2 x i64> %__A) {
+; X86-LABEL: test_mm_maskz_rol_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolq $5, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_rol_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolq $5, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64> %__A, i32 5)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> zeroinitializer
+ ret <2 x i64> %2
+}
+
+define <4 x i64> @test_mm256_rol_epi64(<4 x i64> %__A) {
+; CHECK-LABEL: test_mm256_rol_epi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprolq $5, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64> %__A, i32 5)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64>, i32)
+
+define <4 x i64> @test_mm256_mask_rol_epi64(<4 x i64> %__W, i8 zeroext %__U, <4 x i64> %__A) {
+; X86-LABEL: test_mm256_mask_rol_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolq $5, %ymm1, %ymm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_rol_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolq $5, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64> %__A, i32 5)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> %__W
+ ret <4 x i64> %2
+}
+
+define <4 x i64> @test_mm256_maskz_rol_epi64(i8 zeroext %__U, <4 x i64> %__A) {
+; X86-LABEL: test_mm256_maskz_rol_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolq $5, %ymm0, %ymm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_rol_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolq $5, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64> %__A, i32 5)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> zeroinitializer
+ ret <4 x i64> %2
+}
+
+define <2 x i64> @test_mm_rolv_epi32(<2 x i64> %__A, <2 x i64> %__B) {
+; CHECK-LABEL: test_mm_rolv_epi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprolvd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = bitcast <2 x i64> %__B to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32> %0, <4 x i32> %1)
+ %3 = bitcast <4 x i32> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @test_mm_mask_rolv_epi32(<2 x i64> %__W, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X86-LABEL: test_mm_mask_rolv_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolvd %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_rolv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolvd %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = bitcast <2 x i64> %__B to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32> %0, <4 x i32> %1)
+ %3 = bitcast <2 x i64> %__W to <4 x i32>
+ %4 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %5 = select <4 x i1> %extract.i, <4 x i32> %2, <4 x i32> %3
+ %6 = bitcast <4 x i32> %5 to <2 x i64>
+ ret <2 x i64> %6
+}
+
+define <2 x i64> @test_mm_maskz_rolv_epi32(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X86-LABEL: test_mm_maskz_rolv_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolvd %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_rolv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolvd %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = bitcast <2 x i64> %__B to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32> %0, <4 x i32> %1)
+ %3 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = select <4 x i1> %extract.i, <4 x i32> %2, <4 x i32> zeroinitializer
+ %5 = bitcast <4 x i32> %4 to <2 x i64>
+ ret <2 x i64> %5
+}
+
+define <4 x i64> @test_mm256_rolv_epi32(<4 x i64> %__A, <4 x i64> %__B) {
+; CHECK-LABEL: test_mm256_rolv_epi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprolvd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = bitcast <4 x i64> %__B to <8 x i32>
+ %2 = tail call <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32> %0, <8 x i32> %1)
+ %3 = bitcast <8 x i32> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @test_mm256_mask_rolv_epi32(<4 x i64> %__W, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X86-LABEL: test_mm256_mask_rolv_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolvd %ymm2, %ymm1, %ymm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_rolv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolvd %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = bitcast <4 x i64> %__B to <8 x i32>
+ %2 = tail call <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32> %0, <8 x i32> %1)
+ %3 = bitcast <4 x i64> %__W to <8 x i32>
+ %4 = bitcast i8 %__U to <8 x i1>
+ %5 = select <8 x i1> %4, <8 x i32> %2, <8 x i32> %3
+ %6 = bitcast <8 x i32> %5 to <4 x i64>
+ ret <4 x i64> %6
+}
+
+define <4 x i64> @test_mm256_maskz_rolv_epi32(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X86-LABEL: test_mm256_maskz_rolv_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolvd %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_rolv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolvd %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = bitcast <4 x i64> %__B to <8 x i32>
+ %2 = tail call <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32> %0, <8 x i32> %1)
+ %3 = bitcast i8 %__U to <8 x i1>
+ %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
+ %5 = bitcast <8 x i32> %4 to <4 x i64>
+ ret <4 x i64> %5
+}
+
+define <2 x i64> @test_mm_rolv_epi64(<2 x i64> %__A, <2 x i64> %__B) {
+; CHECK-LABEL: test_mm_rolv_epi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprolvq %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64> %__A, <2 x i64> %__B)
+ ret <2 x i64> %0
+}
+
+define <2 x i64> @test_mm_mask_rolv_epi64(<2 x i64> %__W, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X86-LABEL: test_mm_mask_rolv_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolvq %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_rolv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolvq %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64> %__A, <2 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> %__W
+ ret <2 x i64> %2
+}
+
+define <2 x i64> @test_mm_maskz_rolv_epi64(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X86-LABEL: test_mm_maskz_rolv_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolvq %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_rolv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolvq %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64> %__A, <2 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> zeroinitializer
+ ret <2 x i64> %2
+}
+
+define <4 x i64> @test_mm256_rolv_epi64(<4 x i64> %__A, <4 x i64> %__B) {
+; CHECK-LABEL: test_mm256_rolv_epi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprolvq %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64> %__A, <4 x i64> %__B)
+ ret <4 x i64> %0
+}
+
+define <4 x i64> @test_mm256_mask_rolv_epi64(<4 x i64> %__W, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X86-LABEL: test_mm256_mask_rolv_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolvq %ymm2, %ymm1, %ymm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_rolv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolvq %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64> %__A, <4 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> %__W
+ ret <4 x i64> %2
+}
+
+define <4 x i64> @test_mm256_maskz_rolv_epi64(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X86-LABEL: test_mm256_maskz_rolv_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprolvq %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_rolv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprolvq %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64> %__A, <4 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> zeroinitializer
+ ret <4 x i64> %2
+}
+
+define <2 x i64> @test_mm_ror_epi32(<2 x i64> %__A) {
+; CHECK-LABEL: test_mm_ror_epi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprord $5, %xmm0, %xmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = tail call <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32> %0, i32 5)
+ %2 = bitcast <4 x i32> %1 to <2 x i64>
+ ret <2 x i64> %2
+}
+
+declare <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32>, i32)
+
+define <2 x i64> @test_mm_mask_ror_epi32(<2 x i64> %__W, i8 zeroext %__U, <2 x i64> %__A) {
+; X86-LABEL: test_mm_mask_ror_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprord $5, %xmm1, %xmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_ror_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprord $5, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = tail call <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32> %0, i32 5)
+ %2 = bitcast <2 x i64> %__W to <4 x i32>
+ %3 = bitcast i8 %__U to <8 x i1>
+ %extract = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = select <4 x i1> %extract, <4 x i32> %1, <4 x i32> %2
+ %5 = bitcast <4 x i32> %4 to <2 x i64>
+ ret <2 x i64> %5
+}
+
+define <2 x i64> @test_mm_maskz_ror_epi32(i8 zeroext %__U, <2 x i64> %__A) {
+; X86-LABEL: test_mm_maskz_ror_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprord $5, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_ror_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprord $5, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = tail call <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32> %0, i32 5)
+ %2 = bitcast i8 %__U to <8 x i1>
+ %extract = shufflevector <8 x i1> %2, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract, <4 x i32> %1, <4 x i32> zeroinitializer
+ %4 = bitcast <4 x i32> %3 to <2 x i64>
+ ret <2 x i64> %4
+}
+
+define <4 x i64> @test_mm256_ror_epi32(<4 x i64> %__A) {
+; CHECK-LABEL: test_mm256_ror_epi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprord $5, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = tail call <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32> %0, i32 5)
+ %2 = bitcast <8 x i32> %1 to <4 x i64>
+ ret <4 x i64> %2
+}
+
+declare <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32>, i32)
+
+define <4 x i64> @test_mm256_mask_ror_epi32(<4 x i64> %__W, i8 zeroext %__U, <4 x i64> %__A) {
+; X86-LABEL: test_mm256_mask_ror_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprord $5, %ymm1, %ymm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_ror_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprord $5, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = tail call <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32> %0, i32 5)
+ %2 = bitcast <4 x i64> %__W to <8 x i32>
+ %3 = bitcast i8 %__U to <8 x i1>
+ %4 = select <8 x i1> %3, <8 x i32> %1, <8 x i32> %2
+ %5 = bitcast <8 x i32> %4 to <4 x i64>
+ ret <4 x i64> %5
+}
+
+define <4 x i64> @test_mm256_maskz_ror_epi32(i8 zeroext %__U, <4 x i64> %__A) {
+; X86-LABEL: test_mm256_maskz_ror_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprord $5, %ymm0, %ymm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_ror_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprord $5, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = tail call <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32> %0, i32 5)
+ %2 = bitcast i8 %__U to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> zeroinitializer
+ %4 = bitcast <8 x i32> %3 to <4 x i64>
+ ret <4 x i64> %4
+}
+
+define <2 x i64> @test_mm_ror_epi64(<2 x i64> %__A) {
+; CHECK-LABEL: test_mm_ror_epi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprorq $5, %xmm0, %xmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64> %__A, i32 5)
+ ret <2 x i64> %0
+}
+
+declare <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64>, i32)
+
+define <2 x i64> @test_mm_mask_ror_epi64(<2 x i64> %__W, i8 zeroext %__U, <2 x i64> %__A) {
+; X86-LABEL: test_mm_mask_ror_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorq $5, %xmm1, %xmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_ror_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorq $5, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64> %__A, i32 5)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> %__W
+ ret <2 x i64> %2
+}
+
+define <2 x i64> @test_mm_maskz_ror_epi64(i8 zeroext %__U, <2 x i64> %__A) {
+; X86-LABEL: test_mm_maskz_ror_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorq $5, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_ror_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorq $5, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64> %__A, i32 5)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %2 = select <2 x i1> %extract, <2 x i64> %0, <2 x i64> zeroinitializer
+ ret <2 x i64> %2
+}
+
+define <4 x i64> @test_mm256_ror_epi64(<4 x i64> %__A) {
+; CHECK-LABEL: test_mm256_ror_epi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprorq $5, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64> %__A, i32 5)
+ ret <4 x i64> %0
+}
+
+declare <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64>, i32)
+
+define <4 x i64> @test_mm256_mask_ror_epi64(<4 x i64> %__W, i8 zeroext %__U, <4 x i64> %__A) {
+; X86-LABEL: test_mm256_mask_ror_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorq $5, %ymm1, %ymm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_ror_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorq $5, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64> %__A, i32 5)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> %__W
+ ret <4 x i64> %2
+}
+
+define <4 x i64> @test_mm256_maskz_ror_epi64(i8 zeroext %__U, <4 x i64> %__A) {
+; X86-LABEL: test_mm256_maskz_ror_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorq $5, %ymm0, %ymm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_ror_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorq $5, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64> %__A, i32 5)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %2 = select <4 x i1> %extract, <4 x i64> %0, <4 x i64> zeroinitializer
+ ret <4 x i64> %2
+}
+
+define <2 x i64> @test_mm_rorv_epi32(<2 x i64> %__A, <2 x i64> %__B) {
+; CHECK-LABEL: test_mm_rorv_epi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprorvd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = bitcast <2 x i64> %__B to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32> %0, <4 x i32> %1)
+ %3 = bitcast <4 x i32> %2 to <2 x i64>
+ ret <2 x i64> %3
+}
+
+define <2 x i64> @test_mm_mask_rorv_epi32(<2 x i64> %__W, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X86-LABEL: test_mm_mask_rorv_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorvd %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_rorv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorvd %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = bitcast <2 x i64> %__B to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32> %0, <4 x i32> %1)
+ %3 = bitcast <2 x i64> %__W to <4 x i32>
+ %4 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %5 = select <4 x i1> %extract.i, <4 x i32> %2, <4 x i32> %3
+ %6 = bitcast <4 x i32> %5 to <2 x i64>
+ ret <2 x i64> %6
+}
+
+define <2 x i64> @test_mm_maskz_rorv_epi32(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X86-LABEL: test_mm_maskz_rorv_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorvd %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_rorv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorvd %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__A to <4 x i32>
+ %1 = bitcast <2 x i64> %__B to <4 x i32>
+ %2 = tail call <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32> %0, <4 x i32> %1)
+ %3 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = select <4 x i1> %extract.i, <4 x i32> %2, <4 x i32> zeroinitializer
+ %5 = bitcast <4 x i32> %4 to <2 x i64>
+ ret <2 x i64> %5
+}
+
+define <4 x i64> @test_mm256_rorv_epi32(<4 x i64> %__A, <4 x i64> %__B) {
+; CHECK-LABEL: test_mm256_rorv_epi32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprorvd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = bitcast <4 x i64> %__B to <8 x i32>
+ %2 = tail call <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32> %0, <8 x i32> %1)
+ %3 = bitcast <8 x i32> %2 to <4 x i64>
+ ret <4 x i64> %3
+}
+
+define <4 x i64> @test_mm256_mask_rorv_epi32(<4 x i64> %__W, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X86-LABEL: test_mm256_mask_rorv_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorvd %ymm2, %ymm1, %ymm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_rorv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorvd %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = bitcast <4 x i64> %__B to <8 x i32>
+ %2 = tail call <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32> %0, <8 x i32> %1)
+ %3 = bitcast <4 x i64> %__W to <8 x i32>
+ %4 = bitcast i8 %__U to <8 x i1>
+ %5 = select <8 x i1> %4, <8 x i32> %2, <8 x i32> %3
+ %6 = bitcast <8 x i32> %5 to <4 x i64>
+ ret <4 x i64> %6
+}
+
+define <4 x i64> @test_mm256_maskz_rorv_epi32(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X86-LABEL: test_mm256_maskz_rorv_epi32:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorvd %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_rorv_epi32:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorvd %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__A to <8 x i32>
+ %1 = bitcast <4 x i64> %__B to <8 x i32>
+ %2 = tail call <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32> %0, <8 x i32> %1)
+ %3 = bitcast i8 %__U to <8 x i1>
+ %4 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> zeroinitializer
+ %5 = bitcast <8 x i32> %4 to <4 x i64>
+ ret <4 x i64> %5
+}
+
+define <2 x i64> @test_mm_rorv_epi64(<2 x i64> %__A, <2 x i64> %__B) {
+; CHECK-LABEL: test_mm_rorv_epi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprorvq %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64> %__A, <2 x i64> %__B)
+ ret <2 x i64> %0
+}
+
+define <2 x i64> @test_mm_mask_rorv_epi64(<2 x i64> %__W, i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X86-LABEL: test_mm_mask_rorv_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorvq %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_mask_rorv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorvq %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64> %__A, <2 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> %__W
+ ret <2 x i64> %2
+}
+
+define <2 x i64> @test_mm_maskz_rorv_epi64(i8 zeroext %__U, <2 x i64> %__A, <2 x i64> %__B) {
+; X86-LABEL: test_mm_maskz_rorv_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorvq %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm_maskz_rorv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorvq %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64> %__A, <2 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %2 = select <2 x i1> %extract.i, <2 x i64> %0, <2 x i64> zeroinitializer
+ ret <2 x i64> %2
+}
+
+define <4 x i64> @test_mm256_rorv_epi64(<4 x i64> %__A, <4 x i64> %__B) {
+; CHECK-LABEL: test_mm256_rorv_epi64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vprorvq %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64> %__A, <4 x i64> %__B)
+ ret <4 x i64> %0
+}
+
+define <4 x i64> @test_mm256_mask_rorv_epi64(<4 x i64> %__W, i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X86-LABEL: test_mm256_mask_rorv_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorvq %ymm2, %ymm1, %ymm0 {%k1}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_mask_rorv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorvq %ymm2, %ymm1, %ymm0 {%k1}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64> %__A, <4 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> %__W
+ ret <4 x i64> %2
+}
+
+define <4 x i64> @test_mm256_maskz_rorv_epi64(i8 zeroext %__U, <4 x i64> %__A, <4 x i64> %__B) {
+; X86-LABEL: test_mm256_maskz_rorv_epi64:
+; X86: # %bb.0: # %entry
+; X86-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-NEXT: kmovw %eax, %k1
+; X86-NEXT: vprorvq %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X86-NEXT: retl
+;
+; X64-LABEL: test_mm256_maskz_rorv_epi64:
+; X64: # %bb.0: # %entry
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vprorvq %ymm1, %ymm0, %ymm0 {%k1} {z}
+; X64-NEXT: retq
+entry:
+ %0 = tail call <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64> %__A, <4 x i64> %__B)
+ %1 = bitcast i8 %__U to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %2 = select <4 x i1> %extract.i, <4 x i64> %0, <4 x i64> zeroinitializer
+ ret <4 x i64> %2
+}
+
declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>)
declare <8 x float> @llvm.x86.avx.cvtdq2.ps.256(<8 x i32>)
declare <4 x i32> @llvm.x86.avx512.mask.cvtpd2dq.128(<2 x double>, <4 x i32>, i8)
@@ -6594,5 +7502,13 @@ declare void @llvm.masked.compressstore.
declare void @llvm.masked.compressstore.v8f32(<8 x float>, float*, <8 x i1>)
declare void @llvm.masked.compressstore.v4i32(<4 x i32>, i32*, <4 x i1>)
declare void @llvm.masked.compressstore.v8i32(<8 x i32>, i32*, <8 x i1>)
+declare <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32>, <8 x i32>)
+declare <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64>, <4 x i64>)
+declare <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32>, <4 x i32>)
+declare <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32>, <8 x i32>)
+declare <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64>, <2 x i64>)
+declare <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64>, <4 x i64>)
!0 = !{i32 1}
Modified: llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll?rev=336035&r1=336034&r2=336035&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll Fri Jun 29 18:32:04 2018
@@ -12107,3 +12107,499 @@ define <8 x float> @test_sqrt_ps_256(<8
}
declare <8 x float> @llvm.x86.avx512.mask.sqrt.ps.256(<8 x float>, <8 x float>, i8) nounwind readnone
+
+declare <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
+
+define <4 x i32>@test_int_x86_avx512_mask_prorv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prorv_d_128:
+; X86: # %bb.0:
+; X86-NEXT: vprorvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x14,0xd9]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprorvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x14,0xd1]
+; X86-NEXT: vprorvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x14,0xc1]
+; X86-NEXT: vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
+; X86-NEXT: vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prorv_d_128:
+; X64: # %bb.0:
+; X64-NEXT: vprorvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x14,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT: vprorvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x14,0xd1]
+; X64-NEXT: vprorvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x14,0xc1]
+; X64-NEXT: vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
+; X64-NEXT: vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
+ %res1 = call <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> zeroinitializer, i8 %x3)
+ %res2 = call <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 -1)
+ %res3 = add <4 x i32> %res, %res1
+ %res4 = add <4 x i32> %res3, %res2
+ ret <4 x i32> %res4
+}
+
+declare <8 x i32> @llvm.x86.avx512.mask.prorv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
+
+define <8 x i32>@test_int_x86_avx512_mask_prorv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prorv_d_256:
+; X86: # %bb.0:
+; X86-NEXT: vprorvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x14,0xd9]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprorvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x14,0xd1]
+; X86-NEXT: vprorvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x14,0xc1]
+; X86-NEXT: vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
+; X86-NEXT: vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prorv_d_256:
+; X64: # %bb.0:
+; X64-NEXT: vprorvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x14,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT: vprorvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x14,0xd1]
+; X64-NEXT: vprorvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x14,0xc1]
+; X64-NEXT: vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
+; X64-NEXT: vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <8 x i32> @llvm.x86.avx512.mask.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
+ %res1 = call <8 x i32> @llvm.x86.avx512.mask.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> zeroinitializer, i8 %x3)
+ %res2 = call <8 x i32> @llvm.x86.avx512.mask.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1)
+ %res3 = add <8 x i32> %res, %res1
+ %res4 = add <8 x i32> %res3, %res2
+ ret <8 x i32> %res4
+}
+
+declare <2 x i64> @llvm.x86.avx512.mask.prorv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+
+define <2 x i64>@test_int_x86_avx512_mask_prorv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prorv_q_128:
+; X86: # %bb.0:
+; X86-NEXT: vprorvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x14,0xd9]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprorvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x14,0xd1]
+; X86-NEXT: vprorvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x14,0xc1]
+; X86-NEXT: vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
+; X86-NEXT: vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prorv_q_128:
+; X64: # %bb.0:
+; X64-NEXT: vprorvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x14,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT: vprorvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x14,0xd1]
+; X64-NEXT: vprorvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x14,0xc1]
+; X64-NEXT: vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
+; X64-NEXT: vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <2 x i64> @llvm.x86.avx512.mask.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
+ %res1 = call <2 x i64> @llvm.x86.avx512.mask.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <2 x i64> @llvm.x86.avx512.mask.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1)
+ %res3 = add <2 x i64> %res, %res1
+ %res4 = add <2 x i64> %res3, %res2
+ ret <2 x i64> %res4
+}
+
+declare <4 x i64> @llvm.x86.avx512.mask.prorv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+
+define <4 x i64>@test_int_x86_avx512_mask_prorv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prorv_q_256:
+; X86: # %bb.0:
+; X86-NEXT: vprorvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x14,0xd9]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprorvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x14,0xd1]
+; X86-NEXT: vprorvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x14,0xc1]
+; X86-NEXT: vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
+; X86-NEXT: vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prorv_q_256:
+; X64: # %bb.0:
+; X64-NEXT: vprorvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x14,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT: vprorvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x14,0xd1]
+; X64-NEXT: vprorvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x14,0xc1]
+; X64-NEXT: vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
+; X64-NEXT: vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <4 x i64> @llvm.x86.avx512.mask.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
+ %res1 = call <4 x i64> @llvm.x86.avx512.mask.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <4 x i64> @llvm.x86.avx512.mask.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 -1)
+ %res3 = add <4 x i64> %res, %res1
+ %res4 = add <4 x i64> %res3, %res2
+ ret <4 x i64> %res4
+}
+
+declare <4 x i32> @llvm.x86.avx512.mask.prol.d.128(<4 x i32>, i32, <4 x i32>, i8)
+
+define <4 x i32>@test_int_x86_avx512_mask_prol_d_128(<4 x i32> %x0, i32 %x1, <4 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prol_d_128:
+; X86: # %bb.0:
+; X86-NEXT: vprold $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc8,0x03]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprold $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc8,0x03]
+; X86-NEXT: vprold $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc8,0x03]
+; X86-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
+; X86-NEXT: vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prol_d_128:
+; X64: # %bb.0:
+; X64-NEXT: vprold $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc8,0x03]
+; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT: vprold $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc8,0x03]
+; X64-NEXT: vprold $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc8,0x03]
+; X64-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
+; X64-NEXT: vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <4 x i32> @llvm.x86.avx512.mask.prol.d.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 %x3)
+ %res1 = call <4 x i32> @llvm.x86.avx512.mask.prol.d.128(<4 x i32> %x0, i32 3, <4 x i32> zeroinitializer, i8 %x3)
+ %res2 = call <4 x i32> @llvm.x86.avx512.mask.prol.d.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 -1)
+ %res3 = add <4 x i32> %res, %res1
+ %res4 = add <4 x i32> %res3, %res2
+ ret <4 x i32> %res4
+}
+
+declare <8 x i32> @llvm.x86.avx512.mask.prol.d.256(<8 x i32>, i32, <8 x i32>, i8)
+
+define <8 x i32>@test_int_x86_avx512_mask_prol_d_256(<8 x i32> %x0, i32 %x1, <8 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prol_d_256:
+; X86: # %bb.0:
+; X86-NEXT: vprold $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc8,0x03]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprold $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc8,0x03]
+; X86-NEXT: vprold $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc8,0x03]
+; X86-NEXT: vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
+; X86-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prol_d_256:
+; X64: # %bb.0:
+; X64-NEXT: vprold $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc8,0x03]
+; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT: vprold $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc8,0x03]
+; X64-NEXT: vprold $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc8,0x03]
+; X64-NEXT: vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
+; X64-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <8 x i32> @llvm.x86.avx512.mask.prol.d.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 %x3)
+ %res1 = call <8 x i32> @llvm.x86.avx512.mask.prol.d.256(<8 x i32> %x0, i32 3, <8 x i32> zeroinitializer, i8 %x3)
+ %res2 = call <8 x i32> @llvm.x86.avx512.mask.prol.d.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 -1)
+ %res3 = add <8 x i32> %res, %res1
+ %res4 = add <8 x i32> %res3, %res2
+ ret <8 x i32> %res4
+}
+
+declare <2 x i64> @llvm.x86.avx512.mask.prol.q.128(<2 x i64>, i32, <2 x i64>, i8)
+
+define <2 x i64>@test_int_x86_avx512_mask_prol_q_128(<2 x i64> %x0, i32 %x1, <2 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prol_q_128:
+; X86: # %bb.0:
+; X86-NEXT: vprolq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc8,0x03]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprolq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc8,0x03]
+; X86-NEXT: vprolq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc8,0x03]
+; X86-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
+; X86-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prol_q_128:
+; X64: # %bb.0:
+; X64-NEXT: vprolq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc8,0x03]
+; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT: vprolq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc8,0x03]
+; X64-NEXT: vprolq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc8,0x03]
+; X64-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
+; X64-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <2 x i64> @llvm.x86.avx512.mask.prol.q.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 %x3)
+ %res1 = call <2 x i64> @llvm.x86.avx512.mask.prol.q.128(<2 x i64> %x0, i32 3, <2 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <2 x i64> @llvm.x86.avx512.mask.prol.q.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 -1)
+ %res3 = add <2 x i64> %res, %res1
+ %res4 = add <2 x i64> %res3, %res2
+ ret <2 x i64> %res4
+}
+
+declare <4 x i64> @llvm.x86.avx512.mask.prol.q.256(<4 x i64>, i32, <4 x i64>, i8)
+
+define <4 x i64>@test_int_x86_avx512_mask_prol_q_256(<4 x i64> %x0, i32 %x1, <4 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prol_q_256:
+; X86: # %bb.0:
+; X86-NEXT: vprolq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc8,0x03]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprolq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc8,0x03]
+; X86-NEXT: vprolq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc8,0x03]
+; X86-NEXT: vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
+; X86-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prol_q_256:
+; X64: # %bb.0:
+; X64-NEXT: vprolq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc8,0x03]
+; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT: vprolq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc8,0x03]
+; X64-NEXT: vprolq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc8,0x03]
+; X64-NEXT: vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
+; X64-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <4 x i64> @llvm.x86.avx512.mask.prol.q.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 %x3)
+ %res1 = call <4 x i64> @llvm.x86.avx512.mask.prol.q.256(<4 x i64> %x0, i32 3, <4 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <4 x i64> @llvm.x86.avx512.mask.prol.q.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 -1)
+ %res3 = add <4 x i64> %res, %res1
+ %res4 = add <4 x i64> %res3, %res2
+ ret <4 x i64> %res4
+}
+
+declare <4 x i32> @llvm.x86.avx512.mask.prolv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
+
+define <4 x i32>@test_int_x86_avx512_mask_prolv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prolv_d_128:
+; X86: # %bb.0:
+; X86-NEXT: vprolvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x15,0xd9]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprolvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x15,0xd1]
+; X86-NEXT: vprolvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x15,0xc1]
+; X86-NEXT: vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
+; X86-NEXT: vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prolv_d_128:
+; X64: # %bb.0:
+; X64-NEXT: vprolvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x15,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT: vprolvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x15,0xd1]
+; X64-NEXT: vprolvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x15,0xc1]
+; X64-NEXT: vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
+; X64-NEXT: vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <4 x i32> @llvm.x86.avx512.mask.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
+ %res1 = call <4 x i32> @llvm.x86.avx512.mask.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> zeroinitializer, i8 %x3)
+ %res2 = call <4 x i32> @llvm.x86.avx512.mask.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 -1)
+ %res3 = add <4 x i32> %res, %res1
+ %res4 = add <4 x i32> %res3, %res2
+ ret <4 x i32> %res4
+}
+
+declare <8 x i32> @llvm.x86.avx512.mask.prolv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
+
+define <8 x i32>@test_int_x86_avx512_mask_prolv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prolv_d_256:
+; X86: # %bb.0:
+; X86-NEXT: vprolvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x15,0xd9]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprolvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x15,0xd1]
+; X86-NEXT: vprolvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x15,0xc1]
+; X86-NEXT: vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
+; X86-NEXT: vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prolv_d_256:
+; X64: # %bb.0:
+; X64-NEXT: vprolvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x15,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT: vprolvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x15,0xd1]
+; X64-NEXT: vprolvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x15,0xc1]
+; X64-NEXT: vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
+; X64-NEXT: vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <8 x i32> @llvm.x86.avx512.mask.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
+ %res1 = call <8 x i32> @llvm.x86.avx512.mask.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> zeroinitializer, i8 %x3)
+ %res2 = call <8 x i32> @llvm.x86.avx512.mask.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1)
+ %res3 = add <8 x i32> %res, %res1
+ %res4 = add <8 x i32> %res3, %res2
+ ret <8 x i32> %res4
+}
+
+declare <2 x i64> @llvm.x86.avx512.mask.prolv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+
+define <2 x i64>@test_int_x86_avx512_mask_prolv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prolv_q_128:
+; X86: # %bb.0:
+; X86-NEXT: vprolvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x15,0xd9]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprolvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x15,0xd1]
+; X86-NEXT: vprolvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x15,0xc1]
+; X86-NEXT: vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
+; X86-NEXT: vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prolv_q_128:
+; X64: # %bb.0:
+; X64-NEXT: vprolvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x15,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT: vprolvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x15,0xd1]
+; X64-NEXT: vprolvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x15,0xc1]
+; X64-NEXT: vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
+; X64-NEXT: vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <2 x i64> @llvm.x86.avx512.mask.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
+ %res1 = call <2 x i64> @llvm.x86.avx512.mask.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <2 x i64> @llvm.x86.avx512.mask.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1)
+ %res3 = add <2 x i64> %res, %res1
+ %res4 = add <2 x i64> %res3, %res2
+ ret <2 x i64> %res4
+}
+
+declare <4 x i64> @llvm.x86.avx512.mask.prolv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+
+define <4 x i64>@test_int_x86_avx512_mask_prolv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_prolv_q_256:
+; X86: # %bb.0:
+; X86-NEXT: vprolvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x15,0xd9]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprolvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x15,0xd1]
+; X86-NEXT: vprolvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x15,0xc1]
+; X86-NEXT: vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
+; X86-NEXT: vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_prolv_q_256:
+; X64: # %bb.0:
+; X64-NEXT: vprolvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x15,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
+; X64-NEXT: vprolvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x15,0xd1]
+; X64-NEXT: vprolvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x15,0xc1]
+; X64-NEXT: vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
+; X64-NEXT: vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <4 x i64> @llvm.x86.avx512.mask.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
+ %res1 = call <4 x i64> @llvm.x86.avx512.mask.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <4 x i64> @llvm.x86.avx512.mask.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 -1)
+ %res3 = add <4 x i64> %res, %res1
+ %res4 = add <4 x i64> %res3, %res2
+ ret <4 x i64> %res4
+}
+
+declare <4 x i32> @llvm.x86.avx512.mask.pror.d.128(<4 x i32>, i32, <4 x i32>, i8)
+
+define <4 x i32>@test_int_x86_avx512_mask_pror_d_128(<4 x i32> %x0, i32 %x1, <4 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_pror_d_128:
+; X86: # %bb.0:
+; X86-NEXT: vprord $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc0,0x03]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprord $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc0,0x03]
+; X86-NEXT: vprord $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc0,0x03]
+; X86-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
+; X86-NEXT: vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_pror_d_128:
+; X64: # %bb.0:
+; X64-NEXT: vprord $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc0,0x03]
+; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT: vprord $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc0,0x03]
+; X64-NEXT: vprord $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc0,0x03]
+; X64-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
+; X64-NEXT: vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <4 x i32> @llvm.x86.avx512.mask.pror.d.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 %x3)
+ %res1 = call <4 x i32> @llvm.x86.avx512.mask.pror.d.128(<4 x i32> %x0, i32 3, <4 x i32> zeroinitializer, i8 %x3)
+ %res2 = call <4 x i32> @llvm.x86.avx512.mask.pror.d.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 -1)
+ %res3 = add <4 x i32> %res, %res1
+ %res4 = add <4 x i32> %res3, %res2
+ ret <4 x i32> %res4
+}
+
+declare <8 x i32> @llvm.x86.avx512.mask.pror.d.256(<8 x i32>, i32, <8 x i32>, i8)
+
+define <8 x i32>@test_int_x86_avx512_mask_pror_d_256(<8 x i32> %x0, i32 %x1, <8 x i32> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_pror_d_256:
+; X86: # %bb.0:
+; X86-NEXT: vprord $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc0,0x03]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprord $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc0,0x03]
+; X86-NEXT: vprord $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc0,0x03]
+; X86-NEXT: vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
+; X86-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_pror_d_256:
+; X64: # %bb.0:
+; X64-NEXT: vprord $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc0,0x03]
+; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT: vprord $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc0,0x03]
+; X64-NEXT: vprord $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc0,0x03]
+; X64-NEXT: vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
+; X64-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <8 x i32> @llvm.x86.avx512.mask.pror.d.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 %x3)
+ %res1 = call <8 x i32> @llvm.x86.avx512.mask.pror.d.256(<8 x i32> %x0, i32 3, <8 x i32> zeroinitializer, i8 %x3)
+ %res2 = call <8 x i32> @llvm.x86.avx512.mask.pror.d.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 -1)
+ %res3 = add <8 x i32> %res, %res1
+ %res4 = add <8 x i32> %res3, %res2
+ ret <8 x i32> %res4
+}
+
+declare <2 x i64> @llvm.x86.avx512.mask.pror.q.128(<2 x i64>, i32, <2 x i64>, i8)
+
+define <2 x i64>@test_int_x86_avx512_mask_pror_q_128(<2 x i64> %x0, i32 %x1, <2 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_pror_q_128:
+; X86: # %bb.0:
+; X86-NEXT: vprorq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc0,0x03]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprorq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc0,0x03]
+; X86-NEXT: vprorq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc0,0x03]
+; X86-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
+; X86-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_pror_q_128:
+; X64: # %bb.0:
+; X64-NEXT: vprorq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc0,0x03]
+; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT: vprorq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc0,0x03]
+; X64-NEXT: vprorq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc0,0x03]
+; X64-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
+; X64-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <2 x i64> @llvm.x86.avx512.mask.pror.q.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 %x3)
+ %res1 = call <2 x i64> @llvm.x86.avx512.mask.pror.q.128(<2 x i64> %x0, i32 3, <2 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <2 x i64> @llvm.x86.avx512.mask.pror.q.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 -1)
+ %res3 = add <2 x i64> %res, %res1
+ %res4 = add <2 x i64> %res3, %res2
+ ret <2 x i64> %res4
+}
+
+declare <4 x i64> @llvm.x86.avx512.mask.pror.q.256(<4 x i64>, i32, <4 x i64>, i8)
+
+define <4 x i64>@test_int_x86_avx512_mask_pror_q_256(<4 x i64> %x0, i32 %x1, <4 x i64> %x2, i8 %x3) {
+; X86-LABEL: test_int_x86_avx512_mask_pror_q_256:
+; X86: # %bb.0:
+; X86-NEXT: vprorq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc0,0x03]
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
+; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
+; X86-NEXT: vprorq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc0,0x03]
+; X86-NEXT: vprorq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc0,0x03]
+; X86-NEXT: vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
+; X86-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx512_mask_pror_q_256:
+; X64: # %bb.0:
+; X64-NEXT: vprorq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc0,0x03]
+; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
+; X64-NEXT: vprorq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc0,0x03]
+; X64-NEXT: vprorq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc0,0x03]
+; X64-NEXT: vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
+; X64-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <4 x i64> @llvm.x86.avx512.mask.pror.q.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 %x3)
+ %res1 = call <4 x i64> @llvm.x86.avx512.mask.pror.q.256(<4 x i64> %x0, i32 3, <4 x i64> zeroinitializer, i8 %x3)
+ %res2 = call <4 x i64> @llvm.x86.avx512.mask.pror.q.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 -1)
+ %res3 = add <4 x i64> %res, %res1
+ %res4 = add <4 x i64> %res3, %res2
+ ret <4 x i64> %res4
+}
Modified: llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll?rev=336035&r1=336034&r2=336035&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll Fri Jun 29 18:32:04 2018
@@ -4725,499 +4725,587 @@ define <2 x double> @test_rcp_pd_128_rrk
declare <4 x double> @llvm.x86.avx512.rcp14.pd.256(<4 x double>, <4 x double>, i8) nounwind readnone
declare <2 x double> @llvm.x86.avx512.rcp14.pd.128(<2 x double>, <2 x double>, i8) nounwind readnone
-declare <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
+declare <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32>, <4 x i32>)
define <4 x i32>@test_int_x86_avx512_mask_prorv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_prorv_d_128:
; X86: # %bb.0:
+; X86-NEXT: vprorvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x14,0xd9]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprorvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x14,0xd1]
-; X86-NEXT: vprorvd %xmm1, %xmm0, %xmm3 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x14,0xd9]
-; X86-NEXT: vprorvd %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x14,0xc1]
-; X86-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
+; X86-NEXT: vprorvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x14,0xc1]
+; X86-NEXT: vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
; X86-NEXT: vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_prorv_d_128:
; X64: # %bb.0:
-; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprorvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x14,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprorvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x14,0xd1]
; X64-NEXT: vprorvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x14,0xc1]
; X64-NEXT: vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
; X64-NEXT: vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
- %res1 = call <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> zeroinitializer, i8 %x3)
- %res2 = call <4 x i32> @llvm.x86.avx512.mask.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 -1)
- %res3 = add <4 x i32> %res, %res1
- %res4 = add <4 x i32> %res3, %res2
+ %1 = call <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x2
+ %4 = call <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = select <4 x i1> %extract, <4 x i32> %4, <4 x i32> zeroinitializer
+ %7 = call <4 x i32> @llvm.x86.avx512.prorv.d.128(<4 x i32> %x0, <4 x i32> %x1)
+ %res3 = add <4 x i32> %3, %6
+ %res4 = add <4 x i32> %res3, %7
ret <4 x i32> %res4
}
-declare <8 x i32> @llvm.x86.avx512.mask.prorv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
+declare <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32>, <8 x i32>)
define <8 x i32>@test_int_x86_avx512_mask_prorv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_prorv_d_256:
; X86: # %bb.0:
+; X86-NEXT: vprorvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x14,0xd9]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprorvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x14,0xd1]
-; X86-NEXT: vprorvd %ymm1, %ymm0, %ymm3 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x14,0xd9]
-; X86-NEXT: vprorvd %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf2,0x7d,0x28,0x14,0xc1]
-; X86-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
+; X86-NEXT: vprorvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x14,0xc1]
+; X86-NEXT: vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
; X86-NEXT: vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_prorv_d_256:
; X64: # %bb.0:
-; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprorvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x14,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprorvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x14,0xd1]
; X64-NEXT: vprorvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x14,0xc1]
; X64-NEXT: vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
; X64-NEXT: vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <8 x i32> @llvm.x86.avx512.mask.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
- %res1 = call <8 x i32> @llvm.x86.avx512.mask.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> zeroinitializer, i8 %x3)
- %res2 = call <8 x i32> @llvm.x86.avx512.mask.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1)
- %res3 = add <8 x i32> %res, %res1
- %res4 = add <8 x i32> %res3, %res2
+ %1 = call <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x2
+ %4 = call <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %6 = select <8 x i1> %5, <8 x i32> %4, <8 x i32> zeroinitializer
+ %7 = call <8 x i32> @llvm.x86.avx512.prorv.d.256(<8 x i32> %x0, <8 x i32> %x1)
+ %res3 = add <8 x i32> %3, %6
+ %res4 = add <8 x i32> %res3, %7
ret <8 x i32> %res4
}
-declare <2 x i64> @llvm.x86.avx512.mask.prorv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+declare <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64>, <2 x i64>)
define <2 x i64>@test_int_x86_avx512_mask_prorv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_prorv_q_128:
; X86: # %bb.0:
+; X86-NEXT: vprorvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x14,0xd9]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprorvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x14,0xd1]
-; X86-NEXT: vprorvq %xmm1, %xmm0, %xmm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x14,0xd9]
-; X86-NEXT: vprorvq %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x14,0xc1]
-; X86-NEXT: vpaddq %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xd4,0xc0]
+; X86-NEXT: vprorvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x14,0xc1]
+; X86-NEXT: vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
; X86-NEXT: vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_prorv_q_128:
; X64: # %bb.0:
-; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprorvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x14,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprorvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x14,0xd1]
; X64-NEXT: vprorvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x14,0xc1]
; X64-NEXT: vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
; X64-NEXT: vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.avx512.mask.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
- %res1 = call <2 x i64> @llvm.x86.avx512.mask.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3)
- %res2 = call <2 x i64> @llvm.x86.avx512.mask.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1)
- %res3 = add <2 x i64> %res, %res1
- %res4 = add <2 x i64> %res3, %res2
+ %1 = call <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
+ %3 = select <2 x i1> %extract1, <2 x i64> %1, <2 x i64> %x2
+ %4 = call <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <2 x i32> <i32 0, i32 1>
+ %6 = select <2 x i1> %extract, <2 x i64> %4, <2 x i64> zeroinitializer
+ %7 = call <2 x i64> @llvm.x86.avx512.prorv.q.128(<2 x i64> %x0, <2 x i64> %x1)
+ %res3 = add <2 x i64> %3, %6
+ %res4 = add <2 x i64> %res3, %7
ret <2 x i64> %res4
}
-declare <4 x i64> @llvm.x86.avx512.mask.prorv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+declare <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64>, <4 x i64>)
define <4 x i64>@test_int_x86_avx512_mask_prorv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_prorv_q_256:
; X86: # %bb.0:
+; X86-NEXT: vprorvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x14,0xd9]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprorvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x14,0xd1]
-; X86-NEXT: vprorvq %ymm1, %ymm0, %ymm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x14,0xd9]
-; X86-NEXT: vprorvq %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x14,0xc1]
-; X86-NEXT: vpaddq %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xd4,0xc0]
+; X86-NEXT: vprorvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x14,0xc1]
+; X86-NEXT: vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
; X86-NEXT: vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_prorv_q_256:
; X64: # %bb.0:
-; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprorvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x14,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprorvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x14,0xd1]
; X64-NEXT: vprorvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x14,0xc1]
; X64-NEXT: vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
; X64-NEXT: vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <4 x i64> @llvm.x86.avx512.mask.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
- %res1 = call <4 x i64> @llvm.x86.avx512.mask.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
- %res2 = call <4 x i64> @llvm.x86.avx512.mask.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 -1)
- %res3 = add <4 x i64> %res, %res1
- %res4 = add <4 x i64> %res3, %res2
+ %1 = call <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract1, <4 x i64> %1, <4 x i64> %x2
+ %4 = call <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = select <4 x i1> %extract, <4 x i64> %4, <4 x i64> zeroinitializer
+ %7 = call <4 x i64> @llvm.x86.avx512.prorv.q.256(<4 x i64> %x0, <4 x i64> %x1)
+ %res3 = add <4 x i64> %3, %6
+ %res4 = add <4 x i64> %res3, %7
ret <4 x i64> %res4
}
-declare <4 x i32> @llvm.x86.avx512.mask.prol.d.128(<4 x i32>, i32, <4 x i32>, i8)
+declare <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32>, i32)
define <4 x i32>@test_int_x86_avx512_mask_prol_d_128(<4 x i32> %x0, i32 %x1, <4 x i32> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_prol_d_128:
; X86: # %bb.0:
+; X86-NEXT: vprold $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc8,0x03]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprold $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc8,0x03]
-; X86-NEXT: vprold $3, %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf1,0x6d,0x89,0x72,0xc8,0x03]
-; X86-NEXT: vprold $3, %xmm0, %xmm0 # encoding: [0x62,0xf1,0x7d,0x08,0x72,0xc8,0x03]
-; X86-NEXT: vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X86-NEXT: vprold $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc8,0x03]
+; X86-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
; X86-NEXT: vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_prol_d_128:
; X64: # %bb.0:
+; X64-NEXT: vprold $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc8,0x03]
; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; X64-NEXT: vprold $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc8,0x03]
-; X64-NEXT: vprold $3, %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf1,0x6d,0x89,0x72,0xc8,0x03]
-; X64-NEXT: vprold $3, %xmm0, %xmm0 # encoding: [0x62,0xf1,0x7d,0x08,0x72,0xc8,0x03]
-; X64-NEXT: vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X64-NEXT: vprold $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc8,0x03]
+; X64-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
; X64-NEXT: vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.avx512.mask.prol.d.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 %x3)
- %res1 = call <4 x i32> @llvm.x86.avx512.mask.prol.d.128(<4 x i32> %x0, i32 3, <4 x i32> zeroinitializer, i8 %x3)
- %res2 = call <4 x i32> @llvm.x86.avx512.mask.prol.d.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 -1)
- %res3 = add <4 x i32> %res, %res1
- %res4 = add <4 x i32> %res3, %res2
+ %1 = call <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32> %x0, i32 3)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x2
+ %4 = call <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32> %x0, i32 3)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = select <4 x i1> %extract, <4 x i32> %4, <4 x i32> zeroinitializer
+ %7 = call <4 x i32> @llvm.x86.avx512.prol.d.128(<4 x i32> %x0, i32 3)
+ %res3 = add <4 x i32> %3, %6
+ %res4 = add <4 x i32> %res3, %7
ret <4 x i32> %res4
}
-declare <8 x i32> @llvm.x86.avx512.mask.prol.d.256(<8 x i32>, i32, <8 x i32>, i8)
+declare <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32>, i32)
define <8 x i32>@test_int_x86_avx512_mask_prol_d_256(<8 x i32> %x0, i32 %x1, <8 x i32> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_prol_d_256:
; X86: # %bb.0:
+; X86-NEXT: vprold $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc8,0x03]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprold $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc8,0x03]
-; X86-NEXT: vprold $3, %ymm0, %ymm2 {%k1} {z} # encoding: [0x62,0xf1,0x6d,0xa9,0x72,0xc8,0x03]
-; X86-NEXT: vprold $3, %ymm0, %ymm0 # encoding: [0x62,0xf1,0x7d,0x28,0x72,0xc8,0x03]
-; X86-NEXT: vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
+; X86-NEXT: vprold $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc8,0x03]
+; X86-NEXT: vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
; X86-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_prol_d_256:
; X64: # %bb.0:
+; X64-NEXT: vprold $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc8,0x03]
; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; X64-NEXT: vprold $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc8,0x03]
-; X64-NEXT: vprold $3, %ymm0, %ymm2 {%k1} {z} # encoding: [0x62,0xf1,0x6d,0xa9,0x72,0xc8,0x03]
-; X64-NEXT: vprold $3, %ymm0, %ymm0 # encoding: [0x62,0xf1,0x7d,0x28,0x72,0xc8,0x03]
-; X64-NEXT: vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
+; X64-NEXT: vprold $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc8,0x03]
+; X64-NEXT: vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
; X64-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <8 x i32> @llvm.x86.avx512.mask.prol.d.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 %x3)
- %res1 = call <8 x i32> @llvm.x86.avx512.mask.prol.d.256(<8 x i32> %x0, i32 3, <8 x i32> zeroinitializer, i8 %x3)
- %res2 = call <8 x i32> @llvm.x86.avx512.mask.prol.d.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 -1)
- %res3 = add <8 x i32> %res, %res1
- %res4 = add <8 x i32> %res3, %res2
+ %1 = call <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32> %x0, i32 3)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x2
+ %4 = call <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32> %x0, i32 3)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %6 = select <8 x i1> %5, <8 x i32> %4, <8 x i32> zeroinitializer
+ %7 = call <8 x i32> @llvm.x86.avx512.prol.d.256(<8 x i32> %x0, i32 3)
+ %res3 = add <8 x i32> %3, %6
+ %res4 = add <8 x i32> %res3, %7
ret <8 x i32> %res4
}
-declare <2 x i64> @llvm.x86.avx512.mask.prol.q.128(<2 x i64>, i32, <2 x i64>, i8)
+declare <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64>, i32)
define <2 x i64>@test_int_x86_avx512_mask_prol_q_128(<2 x i64> %x0, i32 %x1, <2 x i64> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_prol_q_128:
; X86: # %bb.0:
+; X86-NEXT: vprolq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc8,0x03]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprolq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc8,0x03]
-; X86-NEXT: vprolq $3, %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf1,0xed,0x89,0x72,0xc8,0x03]
-; X86-NEXT: vprolq $3, %xmm0, %xmm0 # encoding: [0x62,0xf1,0xfd,0x08,0x72,0xc8,0x03]
-; X86-NEXT: vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
+; X86-NEXT: vprolq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc8,0x03]
+; X86-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
; X86-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_prol_q_128:
; X64: # %bb.0:
+; X64-NEXT: vprolq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc8,0x03]
; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; X64-NEXT: vprolq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc8,0x03]
-; X64-NEXT: vprolq $3, %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf1,0xed,0x89,0x72,0xc8,0x03]
-; X64-NEXT: vprolq $3, %xmm0, %xmm0 # encoding: [0x62,0xf1,0xfd,0x08,0x72,0xc8,0x03]
-; X64-NEXT: vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
+; X64-NEXT: vprolq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc8,0x03]
+; X64-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
; X64-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.avx512.mask.prol.q.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 %x3)
- %res1 = call <2 x i64> @llvm.x86.avx512.mask.prol.q.128(<2 x i64> %x0, i32 3, <2 x i64> zeroinitializer, i8 %x3)
- %res2 = call <2 x i64> @llvm.x86.avx512.mask.prol.q.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 -1)
- %res3 = add <2 x i64> %res, %res1
- %res4 = add <2 x i64> %res3, %res2
+ %1 = call <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64> %x0, i32 3)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
+ %3 = select <2 x i1> %extract1, <2 x i64> %1, <2 x i64> %x2
+ %4 = call <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64> %x0, i32 3)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <2 x i32> <i32 0, i32 1>
+ %6 = select <2 x i1> %extract, <2 x i64> %4, <2 x i64> zeroinitializer
+ %7 = call <2 x i64> @llvm.x86.avx512.prol.q.128(<2 x i64> %x0, i32 3)
+ %res3 = add <2 x i64> %3, %6
+ %res4 = add <2 x i64> %res3, %7
ret <2 x i64> %res4
}
-declare <4 x i64> @llvm.x86.avx512.mask.prol.q.256(<4 x i64>, i32, <4 x i64>, i8)
+declare <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64>, i32)
define <4 x i64>@test_int_x86_avx512_mask_prol_q_256(<4 x i64> %x0, i32 %x1, <4 x i64> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_prol_q_256:
; X86: # %bb.0:
+; X86-NEXT: vprolq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc8,0x03]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprolq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc8,0x03]
-; X86-NEXT: vprolq $3, %ymm0, %ymm2 {%k1} {z} # encoding: [0x62,0xf1,0xed,0xa9,0x72,0xc8,0x03]
-; X86-NEXT: vprolq $3, %ymm0, %ymm0 # encoding: [0x62,0xf1,0xfd,0x28,0x72,0xc8,0x03]
-; X86-NEXT: vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; X86-NEXT: vprolq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc8,0x03]
+; X86-NEXT: vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
; X86-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_prol_q_256:
; X64: # %bb.0:
+; X64-NEXT: vprolq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc8,0x03]
; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; X64-NEXT: vprolq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc8,0x03]
-; X64-NEXT: vprolq $3, %ymm0, %ymm2 {%k1} {z} # encoding: [0x62,0xf1,0xed,0xa9,0x72,0xc8,0x03]
-; X64-NEXT: vprolq $3, %ymm0, %ymm0 # encoding: [0x62,0xf1,0xfd,0x28,0x72,0xc8,0x03]
-; X64-NEXT: vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; X64-NEXT: vprolq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc8,0x03]
+; X64-NEXT: vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
; X64-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <4 x i64> @llvm.x86.avx512.mask.prol.q.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 %x3)
- %res1 = call <4 x i64> @llvm.x86.avx512.mask.prol.q.256(<4 x i64> %x0, i32 3, <4 x i64> zeroinitializer, i8 %x3)
- %res2 = call <4 x i64> @llvm.x86.avx512.mask.prol.q.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 -1)
- %res3 = add <4 x i64> %res, %res1
- %res4 = add <4 x i64> %res3, %res2
+ %1 = call <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64> %x0, i32 3)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract1, <4 x i64> %1, <4 x i64> %x2
+ %4 = call <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64> %x0, i32 3)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = select <4 x i1> %extract, <4 x i64> %4, <4 x i64> zeroinitializer
+ %7 = call <4 x i64> @llvm.x86.avx512.prol.q.256(<4 x i64> %x0, i32 3)
+ %res3 = add <4 x i64> %3, %6
+ %res4 = add <4 x i64> %res3, %7
ret <4 x i64> %res4
}
-declare <4 x i32> @llvm.x86.avx512.mask.prolv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
+declare <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32>, <4 x i32>)
define <4 x i32>@test_int_x86_avx512_mask_prolv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_prolv_d_128:
; X86: # %bb.0:
+; X86-NEXT: vprolvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x15,0xd9]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprolvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x15,0xd1]
-; X86-NEXT: vprolvd %xmm1, %xmm0, %xmm3 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x15,0xd9]
-; X86-NEXT: vprolvd %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf2,0x7d,0x08,0x15,0xc1]
-; X86-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
+; X86-NEXT: vprolvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x15,0xc1]
+; X86-NEXT: vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
; X86-NEXT: vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_prolv_d_128:
; X64: # %bb.0:
-; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprolvd %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0x7d,0x08,0x15,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprolvd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0x15,0xd1]
; X64-NEXT: vprolvd %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0x15,0xc1]
; X64-NEXT: vpaddd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc3]
; X64-NEXT: vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.avx512.mask.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
- %res1 = call <4 x i32> @llvm.x86.avx512.mask.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> zeroinitializer, i8 %x3)
- %res2 = call <4 x i32> @llvm.x86.avx512.mask.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 -1)
- %res3 = add <4 x i32> %res, %res1
- %res4 = add <4 x i32> %res3, %res2
+ %1 = call <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x2
+ %4 = call <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = select <4 x i1> %extract, <4 x i32> %4, <4 x i32> zeroinitializer
+ %7 = call <4 x i32> @llvm.x86.avx512.prolv.d.128(<4 x i32> %x0, <4 x i32> %x1)
+ %res3 = add <4 x i32> %3, %6
+ %res4 = add <4 x i32> %res3, %7
ret <4 x i32> %res4
}
-declare <8 x i32> @llvm.x86.avx512.mask.prolv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
+declare <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32>, <8 x i32>)
define <8 x i32>@test_int_x86_avx512_mask_prolv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_prolv_d_256:
; X86: # %bb.0:
+; X86-NEXT: vprolvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x15,0xd9]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprolvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x15,0xd1]
-; X86-NEXT: vprolvd %ymm1, %ymm0, %ymm3 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x15,0xd9]
-; X86-NEXT: vprolvd %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf2,0x7d,0x28,0x15,0xc1]
-; X86-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
+; X86-NEXT: vprolvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x15,0xc1]
+; X86-NEXT: vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
; X86-NEXT: vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_prolv_d_256:
; X64: # %bb.0:
-; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprolvd %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0x7d,0x28,0x15,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprolvd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0x15,0xd1]
; X64-NEXT: vprolvd %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0x15,0xc1]
; X64-NEXT: vpaddd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc3]
; X64-NEXT: vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <8 x i32> @llvm.x86.avx512.mask.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
- %res1 = call <8 x i32> @llvm.x86.avx512.mask.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> zeroinitializer, i8 %x3)
- %res2 = call <8 x i32> @llvm.x86.avx512.mask.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 -1)
- %res3 = add <8 x i32> %res, %res1
- %res4 = add <8 x i32> %res3, %res2
+ %1 = call <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x2
+ %4 = call <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %6 = select <8 x i1> %5, <8 x i32> %4, <8 x i32> zeroinitializer
+ %7 = call <8 x i32> @llvm.x86.avx512.prolv.d.256(<8 x i32> %x0, <8 x i32> %x1)
+ %res3 = add <8 x i32> %3, %6
+ %res4 = add <8 x i32> %res3, %7
ret <8 x i32> %res4
}
-declare <2 x i64> @llvm.x86.avx512.mask.prolv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
+declare <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64>, <2 x i64>)
define <2 x i64>@test_int_x86_avx512_mask_prolv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_prolv_q_128:
; X86: # %bb.0:
+; X86-NEXT: vprolvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x15,0xd9]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprolvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x15,0xd1]
-; X86-NEXT: vprolvq %xmm1, %xmm0, %xmm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x15,0xd9]
-; X86-NEXT: vprolvq %xmm1, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x15,0xc1]
-; X86-NEXT: vpaddq %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xd4,0xc0]
+; X86-NEXT: vprolvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x15,0xc1]
+; X86-NEXT: vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
; X86-NEXT: vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_prolv_q_128:
; X64: # %bb.0:
-; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprolvq %xmm1, %xmm0, %xmm3 # encoding: [0x62,0xf2,0xfd,0x08,0x15,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprolvq %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0x15,0xd1]
; X64-NEXT: vprolvq %xmm1, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0x15,0xc1]
; X64-NEXT: vpaddq %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc3]
; X64-NEXT: vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.avx512.mask.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
- %res1 = call <2 x i64> @llvm.x86.avx512.mask.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> zeroinitializer, i8 %x3)
- %res2 = call <2 x i64> @llvm.x86.avx512.mask.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1)
- %res3 = add <2 x i64> %res, %res1
- %res4 = add <2 x i64> %res3, %res2
+ %1 = call <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
+ %3 = select <2 x i1> %extract1, <2 x i64> %1, <2 x i64> %x2
+ %4 = call <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <2 x i32> <i32 0, i32 1>
+ %6 = select <2 x i1> %extract, <2 x i64> %4, <2 x i64> zeroinitializer
+ %7 = call <2 x i64> @llvm.x86.avx512.prolv.q.128(<2 x i64> %x0, <2 x i64> %x1)
+ %res3 = add <2 x i64> %3, %6
+ %res4 = add <2 x i64> %res3, %7
ret <2 x i64> %res4
}
-declare <4 x i64> @llvm.x86.avx512.mask.prolv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
+declare <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64>, <4 x i64>)
define <4 x i64>@test_int_x86_avx512_mask_prolv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_prolv_q_256:
; X86: # %bb.0:
+; X86-NEXT: vprolvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x15,0xd9]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprolvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x15,0xd1]
-; X86-NEXT: vprolvq %ymm1, %ymm0, %ymm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x15,0xd9]
-; X86-NEXT: vprolvq %ymm1, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x15,0xc1]
-; X86-NEXT: vpaddq %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xd4,0xc0]
+; X86-NEXT: vprolvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x15,0xc1]
+; X86-NEXT: vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
; X86-NEXT: vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_prolv_q_256:
; X64: # %bb.0:
-; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprolvq %ymm1, %ymm0, %ymm3 # encoding: [0x62,0xf2,0xfd,0x28,0x15,0xd9]
+; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; X64-NEXT: vprolvq %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0x15,0xd1]
; X64-NEXT: vprolvq %ymm1, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0x15,0xc1]
; X64-NEXT: vpaddq %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc3]
; X64-NEXT: vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <4 x i64> @llvm.x86.avx512.mask.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
- %res1 = call <4 x i64> @llvm.x86.avx512.mask.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> zeroinitializer, i8 %x3)
- %res2 = call <4 x i64> @llvm.x86.avx512.mask.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 -1)
- %res3 = add <4 x i64> %res, %res1
- %res4 = add <4 x i64> %res3, %res2
+ %1 = call <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract1, <4 x i64> %1, <4 x i64> %x2
+ %4 = call <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = select <4 x i1> %extract, <4 x i64> %4, <4 x i64> zeroinitializer
+ %7 = call <4 x i64> @llvm.x86.avx512.prolv.q.256(<4 x i64> %x0, <4 x i64> %x1)
+ %res3 = add <4 x i64> %3, %6
+ %res4 = add <4 x i64> %res3, %7
ret <4 x i64> %res4
}
-declare <4 x i32> @llvm.x86.avx512.mask.pror.d.128(<4 x i32>, i32, <4 x i32>, i8)
+declare <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32>, i32)
define <4 x i32>@test_int_x86_avx512_mask_pror_d_128(<4 x i32> %x0, i32 %x1, <4 x i32> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_pror_d_128:
; X86: # %bb.0:
+; X86-NEXT: vprord $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc0,0x03]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprord $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc0,0x03]
-; X86-NEXT: vprord $3, %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf1,0x6d,0x89,0x72,0xc0,0x03]
-; X86-NEXT: vprord $3, %xmm0, %xmm0 # encoding: [0x62,0xf1,0x7d,0x08,0x72,0xc0,0x03]
-; X86-NEXT: vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X86-NEXT: vprord $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc0,0x03]
+; X86-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
; X86-NEXT: vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_pror_d_128:
; X64: # %bb.0:
+; X64-NEXT: vprord $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0x6d,0x08,0x72,0xc0,0x03]
; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; X64-NEXT: vprord $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0x75,0x09,0x72,0xc0,0x03]
-; X64-NEXT: vprord $3, %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf1,0x6d,0x89,0x72,0xc0,0x03]
-; X64-NEXT: vprord $3, %xmm0, %xmm0 # encoding: [0x62,0xf1,0x7d,0x08,0x72,0xc0,0x03]
-; X64-NEXT: vpaddd %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; X64-NEXT: vprord $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0x89,0x72,0xc0,0x03]
+; X64-NEXT: vpaddd %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc2]
; X64-NEXT: vpaddd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <4 x i32> @llvm.x86.avx512.mask.pror.d.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 %x3)
- %res1 = call <4 x i32> @llvm.x86.avx512.mask.pror.d.128(<4 x i32> %x0, i32 3, <4 x i32> zeroinitializer, i8 %x3)
- %res2 = call <4 x i32> @llvm.x86.avx512.mask.pror.d.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 -1)
- %res3 = add <4 x i32> %res, %res1
- %res4 = add <4 x i32> %res3, %res2
+ %1 = call <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32> %x0, i32 3)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract1, <4 x i32> %1, <4 x i32> %x2
+ %4 = call <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32> %x0, i32 3)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = select <4 x i1> %extract, <4 x i32> %4, <4 x i32> zeroinitializer
+ %7 = call <4 x i32> @llvm.x86.avx512.pror.d.128(<4 x i32> %x0, i32 3)
+ %res3 = add <4 x i32> %3, %6
+ %res4 = add <4 x i32> %res3, %7
ret <4 x i32> %res4
}
-declare <8 x i32> @llvm.x86.avx512.mask.pror.d.256(<8 x i32>, i32, <8 x i32>, i8)
+declare <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32>, i32)
define <8 x i32>@test_int_x86_avx512_mask_pror_d_256(<8 x i32> %x0, i32 %x1, <8 x i32> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_pror_d_256:
; X86: # %bb.0:
+; X86-NEXT: vprord $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc0,0x03]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprord $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc0,0x03]
-; X86-NEXT: vprord $3, %ymm0, %ymm2 {%k1} {z} # encoding: [0x62,0xf1,0x6d,0xa9,0x72,0xc0,0x03]
-; X86-NEXT: vprord $3, %ymm0, %ymm0 # encoding: [0x62,0xf1,0x7d,0x28,0x72,0xc0,0x03]
-; X86-NEXT: vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
+; X86-NEXT: vprord $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc0,0x03]
+; X86-NEXT: vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
; X86-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_pror_d_256:
; X64: # %bb.0:
+; X64-NEXT: vprord $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0x6d,0x28,0x72,0xc0,0x03]
; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; X64-NEXT: vprord $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0x75,0x29,0x72,0xc0,0x03]
-; X64-NEXT: vprord $3, %ymm0, %ymm2 {%k1} {z} # encoding: [0x62,0xf1,0x6d,0xa9,0x72,0xc0,0x03]
-; X64-NEXT: vprord $3, %ymm0, %ymm0 # encoding: [0x62,0xf1,0x7d,0x28,0x72,0xc0,0x03]
-; X64-NEXT: vpaddd %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xc0]
+; X64-NEXT: vprord $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xc0,0x03]
+; X64-NEXT: vpaddd %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
; X64-NEXT: vpaddd %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <8 x i32> @llvm.x86.avx512.mask.pror.d.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 %x3)
- %res1 = call <8 x i32> @llvm.x86.avx512.mask.pror.d.256(<8 x i32> %x0, i32 3, <8 x i32> zeroinitializer, i8 %x3)
- %res2 = call <8 x i32> @llvm.x86.avx512.mask.pror.d.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 -1)
- %res3 = add <8 x i32> %res, %res1
- %res4 = add <8 x i32> %res3, %res2
+ %1 = call <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32> %x0, i32 3)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %3 = select <8 x i1> %2, <8 x i32> %1, <8 x i32> %x2
+ %4 = call <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32> %x0, i32 3)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %6 = select <8 x i1> %5, <8 x i32> %4, <8 x i32> zeroinitializer
+ %7 = call <8 x i32> @llvm.x86.avx512.pror.d.256(<8 x i32> %x0, i32 3)
+ %res3 = add <8 x i32> %3, %6
+ %res4 = add <8 x i32> %res3, %7
ret <8 x i32> %res4
}
-declare <2 x i64> @llvm.x86.avx512.mask.pror.q.128(<2 x i64>, i32, <2 x i64>, i8)
+declare <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64>, i32)
define <2 x i64>@test_int_x86_avx512_mask_pror_q_128(<2 x i64> %x0, i32 %x1, <2 x i64> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_pror_q_128:
; X86: # %bb.0:
+; X86-NEXT: vprorq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc0,0x03]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprorq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc0,0x03]
-; X86-NEXT: vprorq $3, %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf1,0xed,0x89,0x72,0xc0,0x03]
-; X86-NEXT: vprorq $3, %xmm0, %xmm0 # encoding: [0x62,0xf1,0xfd,0x08,0x72,0xc0,0x03]
-; X86-NEXT: vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
+; X86-NEXT: vprorq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc0,0x03]
+; X86-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
; X86-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_pror_q_128:
; X64: # %bb.0:
+; X64-NEXT: vprorq $3, %xmm0, %xmm2 # encoding: [0x62,0xf1,0xed,0x08,0x72,0xc0,0x03]
; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; X64-NEXT: vprorq $3, %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc0,0x03]
-; X64-NEXT: vprorq $3, %xmm0, %xmm2 {%k1} {z} # encoding: [0x62,0xf1,0xed,0x89,0x72,0xc0,0x03]
-; X64-NEXT: vprorq $3, %xmm0, %xmm0 # encoding: [0x62,0xf1,0xfd,0x08,0x72,0xc0,0x03]
-; X64-NEXT: vpaddq %xmm0, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
+; X64-NEXT: vprorq $3, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0x89,0x72,0xc0,0x03]
+; X64-NEXT: vpaddq %xmm2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc2]
; X64-NEXT: vpaddq %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.avx512.mask.pror.q.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 %x3)
- %res1 = call <2 x i64> @llvm.x86.avx512.mask.pror.q.128(<2 x i64> %x0, i32 3, <2 x i64> zeroinitializer, i8 %x3)
- %res2 = call <2 x i64> @llvm.x86.avx512.mask.pror.q.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 -1)
- %res3 = add <2 x i64> %res, %res1
- %res4 = add <2 x i64> %res3, %res2
+ %1 = call <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64> %x0, i32 3)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <2 x i32> <i32 0, i32 1>
+ %3 = select <2 x i1> %extract1, <2 x i64> %1, <2 x i64> %x2
+ %4 = call <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64> %x0, i32 3)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <2 x i32> <i32 0, i32 1>
+ %6 = select <2 x i1> %extract, <2 x i64> %4, <2 x i64> zeroinitializer
+ %7 = call <2 x i64> @llvm.x86.avx512.pror.q.128(<2 x i64> %x0, i32 3)
+ %res3 = add <2 x i64> %3, %6
+ %res4 = add <2 x i64> %res3, %7
ret <2 x i64> %res4
}
-declare <4 x i64> @llvm.x86.avx512.mask.pror.q.256(<4 x i64>, i32, <4 x i64>, i8)
+declare <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64>, i32)
define <4 x i64>@test_int_x86_avx512_mask_pror_q_256(<4 x i64> %x0, i32 %x1, <4 x i64> %x2, i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_pror_q_256:
; X86: # %bb.0:
+; X86-NEXT: vprorq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc0,0x03]
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x08]
; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8]
; X86-NEXT: vprorq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc0,0x03]
-; X86-NEXT: vprorq $3, %ymm0, %ymm2 {%k1} {z} # encoding: [0x62,0xf1,0xed,0xa9,0x72,0xc0,0x03]
-; X86-NEXT: vprorq $3, %ymm0, %ymm0 # encoding: [0x62,0xf1,0xfd,0x28,0x72,0xc0,0x03]
-; X86-NEXT: vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; X86-NEXT: vprorq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc0,0x03]
+; X86-NEXT: vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
; X86-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_pror_q_256:
; X64: # %bb.0:
+; X64-NEXT: vprorq $3, %ymm0, %ymm2 # encoding: [0x62,0xf1,0xed,0x28,0x72,0xc0,0x03]
; X64-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; X64-NEXT: vprorq $3, %ymm0, %ymm1 {%k1} # encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc0,0x03]
-; X64-NEXT: vprorq $3, %ymm0, %ymm2 {%k1} {z} # encoding: [0x62,0xf1,0xed,0xa9,0x72,0xc0,0x03]
-; X64-NEXT: vprorq $3, %ymm0, %ymm0 # encoding: [0x62,0xf1,0xfd,0x28,0x72,0xc0,0x03]
-; X64-NEXT: vpaddq %ymm0, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; X64-NEXT: vprorq $3, %ymm0, %ymm0 {%k1} {z} # encoding: [0x62,0xf1,0xfd,0xa9,0x72,0xc0,0x03]
+; X64-NEXT: vpaddq %ymm2, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc2]
; X64-NEXT: vpaddq %ymm0, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
; X64-NEXT: retq # encoding: [0xc3]
- %res = call <4 x i64> @llvm.x86.avx512.mask.pror.q.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 %x3)
- %res1 = call <4 x i64> @llvm.x86.avx512.mask.pror.q.256(<4 x i64> %x0, i32 3, <4 x i64> zeroinitializer, i8 %x3)
- %res2 = call <4 x i64> @llvm.x86.avx512.mask.pror.q.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 -1)
- %res3 = add <4 x i64> %res, %res1
- %res4 = add <4 x i64> %res3, %res2
+ %1 = call <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64> %x0, i32 3)
+ %2 = bitcast i8 %x3 to <8 x i1>
+ %extract1 = shufflevector <8 x i1> %2, <8 x i1> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %3 = select <4 x i1> %extract1, <4 x i64> %1, <4 x i64> %x2
+ %4 = call <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64> %x0, i32 3)
+ %5 = bitcast i8 %x3 to <8 x i1>
+ %extract = shufflevector <8 x i1> %5, <8 x i1> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = select <4 x i1> %extract, <4 x i64> %4, <4 x i64> zeroinitializer
+ %7 = call <4 x i64> @llvm.x86.avx512.pror.q.256(<4 x i64> %x0, i32 3)
+ %res3 = add <4 x i64> %3, %6
+ %res4 = add <4 x i64> %res3, %7
ret <4 x i64> %res4
}
More information about the llvm-commits
mailing list