[llvm] [X86] Generate `kmov` for masking integers (PR #120593)
Abhishek Kaushik via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 25 01:51:49 PST 2025
https://github.com/abhishek-kaushik22 updated https://github.com/llvm/llvm-project/pull/120593
>From 822ae48049fdebc769269291868270314f30ca9a Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Thu, 19 Dec 2024 21:14:10 +0530
Subject: [PATCH 01/17] Generate `kmov` for masking integers
When we have an integer used as a bit mask the llvm ir looks something like this
```
%1 = and <16 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768>
%cmp1 = icmp ne <16 x i32> %1, zeroinitializer
```
where `.splat` is vector containing the mask in all lanes.
The assembly generated for this looks like
```
vpbroadcastd %ecx, %zmm0
vptestmd .LCPI0_0(%rip), %zmm0, %k1
```
where we have a constant table of powers of 2.
Instead of doing this we could just move the relevant bits directly to `k` registers using a `kmov` instruction. This is faster and also reduces code size.
---
llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 79 +++++++--
llvm/test/CodeGen/X86/kmov.ll | 205 ++++++++++++++++++++++++
llvm/test/CodeGen/X86/pr78897.ll | 4 +-
3 files changed, 273 insertions(+), 15 deletions(-)
create mode 100644 llvm/test/CodeGen/X86/kmov.ll
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index bb20e6ecf281b..8c199a30dfbce 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -592,7 +592,7 @@ namespace {
bool matchVPTERNLOG(SDNode *Root, SDNode *ParentA, SDNode *ParentB,
SDNode *ParentC, SDValue A, SDValue B, SDValue C,
uint8_t Imm);
- bool tryVPTESTM(SDNode *Root, SDValue Setcc, SDValue Mask);
+ bool tryVPTESTMOrKMOV(SDNode *Root, SDValue Setcc, SDValue Mask);
bool tryMatchBitSelect(SDNode *N);
MachineSDNode *emitPCMPISTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
@@ -4897,10 +4897,10 @@ VPTESTM_CASE(v32i16, WZ##SUFFIX)
#undef VPTESTM_CASE
}
-// Try to create VPTESTM instruction. If InMask is not null, it will be used
-// to form a masked operation.
-bool X86DAGToDAGISel::tryVPTESTM(SDNode *Root, SDValue Setcc,
- SDValue InMask) {
+// Try to create VPTESTM or KMOV instruction. If InMask is not null, it will be
+// used to form a masked operation.
+bool X86DAGToDAGISel::tryVPTESTMOrKMOV(SDNode *Root, SDValue Setcc,
+ SDValue InMask) {
assert(Subtarget->hasAVX512() && "Expected AVX512!");
assert(Setcc.getSimpleValueType().getVectorElementType() == MVT::i1 &&
"Unexpected VT!");
@@ -4975,12 +4975,70 @@ bool X86DAGToDAGISel::tryVPTESTM(SDNode *Root, SDValue Setcc,
return tryFoldBroadcast(Root, P, L, Base, Scale, Index, Disp, Segment);
};
+ auto canUseKMOV = [&]() {
+ if (Src0.getOpcode() != X86ISD::VBROADCAST)
+ return false;
+
+ if (Src1.getOpcode() != ISD::LOAD ||
+ Src1.getOperand(1).getOpcode() != X86ISD::Wrapper ||
+ Src1.getOperand(1).getOperand(0).getOpcode() != ISD::TargetConstantPool)
+ return false;
+
+ const auto *ConstPool =
+ dyn_cast<ConstantPoolSDNode>(Src1.getOperand(1).getOperand(0));
+ if (!ConstPool)
+ return false;
+
+ const auto *ConstVec = ConstPool->getConstVal();
+ const auto *ConstVecType = dyn_cast<FixedVectorType>(ConstVec->getType());
+ if (!ConstVecType)
+ return false;
+
+ for (unsigned i = 0, e = ConstVecType->getNumElements(), k = 1; i != e;
+ ++i, k *= 2) {
+ const auto *Element = ConstVec->getAggregateElement(i);
+ if (llvm::isa<llvm::UndefValue>(Element)) {
+ for (unsigned j = i + 1; j != e; ++j) {
+ if (!llvm::isa<llvm::UndefValue>(ConstVec->getAggregateElement(j)))
+ return false;
+ }
+ return i != 0;
+ }
+
+ if (Element->getUniqueInteger() != k) {
+ return false;
+ }
+ }
+
+ return true;
+ };
+
// We can only fold loads if the sources are unique.
bool CanFoldLoads = Src0 != Src1;
bool FoldedLoad = false;
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
+ SDLoc dl(Root);
+ bool IsTestN = CC == ISD::SETEQ;
+ MachineSDNode *CNode;
+ MVT ResVT = Setcc.getSimpleValueType();
if (CanFoldLoads) {
+ if (canUseKMOV()) {
+ auto Op = Src0.getOperand(0);
+ if (Op.getSimpleValueType() == MVT::i8) {
+ Op = SDValue(CurDAG->getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Op));
+ }
+ CNode = CurDAG->getMachineNode(
+ ResVT.getVectorNumElements() <= 8 ? X86::KMOVBkr : X86::KMOVWkr, dl,
+ ResVT, Op);
+ if (IsTestN)
+ CNode = CurDAG->getMachineNode(
+ ResVT.getVectorNumElements() <= 8 ? X86::KNOTBkk : X86::KNOTWkk, dl,
+ ResVT, SDValue(CNode, 0));
+ ReplaceUses(SDValue(Root, 0), SDValue(CNode, 0));
+ CurDAG->RemoveDeadNode(Root);
+ return true;
+ }
FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src1, Tmp0, Tmp1, Tmp2,
Tmp3, Tmp4);
if (!FoldedLoad) {
@@ -4996,9 +5054,6 @@ bool X86DAGToDAGISel::tryVPTESTM(SDNode *Root, SDValue Setcc,
bool IsMasked = InMask.getNode() != nullptr;
- SDLoc dl(Root);
-
- MVT ResVT = Setcc.getSimpleValueType();
MVT MaskVT = ResVT;
if (Widen) {
// Widen the inputs using insert_subreg or copy_to_regclass.
@@ -5023,11 +5078,9 @@ bool X86DAGToDAGISel::tryVPTESTM(SDNode *Root, SDValue Setcc,
}
}
- bool IsTestN = CC == ISD::SETEQ;
unsigned Opc = getVPTESTMOpc(CmpVT, IsTestN, FoldedLoad, FoldedBCast,
IsMasked);
- MachineSDNode *CNode;
if (FoldedLoad) {
SDVTList VTs = CurDAG->getVTList(MaskVT, MVT::Other);
@@ -5466,10 +5519,10 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
SDValue N0 = Node->getOperand(0);
SDValue N1 = Node->getOperand(1);
if (N0.getOpcode() == ISD::SETCC && N0.hasOneUse() &&
- tryVPTESTM(Node, N0, N1))
+ tryVPTESTMOrKMOV(Node, N0, N1))
return;
if (N1.getOpcode() == ISD::SETCC && N1.hasOneUse() &&
- tryVPTESTM(Node, N1, N0))
+ tryVPTESTMOrKMOV(Node, N1, N0))
return;
}
@@ -6393,7 +6446,7 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
}
case ISD::SETCC: {
- if (NVT.isVector() && tryVPTESTM(Node, SDValue(Node, 0), SDValue()))
+ if (NVT.isVector() && tryVPTESTMOrKMOV(Node, SDValue(Node, 0), SDValue()))
return;
break;
diff --git a/llvm/test/CodeGen/X86/kmov.ll b/llvm/test/CodeGen/X86/kmov.ll
new file mode 100644
index 0000000000000..6d72a8923c5ab
--- /dev/null
+++ b/llvm/test/CodeGen/X86/kmov.ll
@@ -0,0 +1,205 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skylake-avx512 | FileCheck %s
+
+define dso_local void @foo_16_ne(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
+; CHECK-LABEL: foo_16_ne:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovw %ecx, %k1
+; CHECK-NEXT: vmovups (%rdx), %zmm0 {%k1} {z}
+; CHECK-NEXT: vmovups (%rsi), %zmm1 {%k1} {z}
+; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: vmovups %zmm0, (%rdi) {%k1}
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = and i32 %mask, 65535
+ %.splatinsert = insertelement <16 x i32> poison, i32 %0, i64 0
+ %.splat = shufflevector <16 x i32> %.splatinsert, <16 x i32> poison, <16 x i32> zeroinitializer
+ %1 = and <16 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768>
+ %hir.cmp.45 = icmp ne <16 x i32> %1, zeroinitializer
+ %2 = tail call <16 x float> @llvm.masked.load.v16f32.p0(ptr %b, i32 4, <16 x i1> %hir.cmp.45, <16 x float> poison)
+ %3 = tail call <16 x float> @llvm.masked.load.v16f32.p0(ptr %a, i32 4, <16 x i1> %hir.cmp.45, <16 x float> poison)
+ %4 = fadd reassoc nsz arcp contract afn <16 x float> %2, %3
+ tail call void @llvm.masked.store.v16f32.p0(<16 x float> %4, ptr %c, i32 4, <16 x i1> %hir.cmp.45)
+ ret void
+}
+
+; Function Attrs: mustprogress nounwind uwtable
+define dso_local void @foo_16_eq(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
+; CHECK-LABEL: foo_16_eq:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovw %ecx, %k0
+; CHECK-NEXT: knotw %k0, %k1
+; CHECK-NEXT: vmovups (%rdx), %zmm0 {%k1} {z}
+; CHECK-NEXT: vmovups (%rsi), %zmm1 {%k1} {z}
+; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: vmovups %zmm0, (%rdi) {%k1}
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = and i32 %mask, 65535
+ %.splatinsert = insertelement <16 x i32> poison, i32 %0, i64 0
+ %.splat = shufflevector <16 x i32> %.splatinsert, <16 x i32> poison, <16 x i32> zeroinitializer
+ %1 = and <16 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768>
+ %hir.cmp.45 = icmp eq <16 x i32> %1, zeroinitializer
+ %2 = tail call <16 x float> @llvm.masked.load.v16f32.p0(ptr %b, i32 4, <16 x i1> %hir.cmp.45, <16 x float> poison)
+ %3 = tail call <16 x float> @llvm.masked.load.v16f32.p0(ptr %a, i32 4, <16 x i1> %hir.cmp.45, <16 x float> poison)
+ %4 = fadd reassoc nsz arcp contract afn <16 x float> %2, %3
+ tail call void @llvm.masked.store.v16f32.p0(<16 x float> %4, ptr %c, i32 4, <16 x i1> %hir.cmp.45)
+ ret void
+}
+
+define dso_local void @foo_8_ne(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
+; CHECK-LABEL: foo_8_ne:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovb %ecx, %k1
+; CHECK-NEXT: vmovups (%rdx), %ymm0 {%k1} {z}
+; CHECK-NEXT: vmovups (%rsi), %ymm1 {%k1} {z}
+; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vmovups %ymm0, (%rdi) {%k1}
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = and i32 %mask, 65535
+ %.splatinsert = insertelement <8 x i32> poison, i32 %0, i64 0
+ %.splat = shufflevector <8 x i32> %.splatinsert, <8 x i32> poison, <8 x i32> zeroinitializer
+ %1 = and <8 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128>
+ %hir.cmp.45 = icmp ne <8 x i32> %1, zeroinitializer
+ %2 = tail call <8 x float> @llvm.masked.load.v8f32.p0(ptr %b, i32 4, <8 x i1> %hir.cmp.45, <8 x float> poison)
+ %3 = tail call <8 x float> @llvm.masked.load.v8f32.p0(ptr %a, i32 4, <8 x i1> %hir.cmp.45, <8 x float> poison)
+ %4 = fadd reassoc nsz arcp contract afn <8 x float> %2, %3
+ tail call void @llvm.masked.store.v8f32.p0(<8 x float> %4, ptr %c, i32 4, <8 x i1> %hir.cmp.45)
+ ret void
+}
+
+define dso_local void @foo_8_eq(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
+; CHECK-LABEL: foo_8_eq:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovb %ecx, %k0
+; CHECK-NEXT: knotb %k0, %k1
+; CHECK-NEXT: vmovups (%rdx), %ymm0 {%k1} {z}
+; CHECK-NEXT: vmovups (%rsi), %ymm1 {%k1} {z}
+; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vmovups %ymm0, (%rdi) {%k1}
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = and i32 %mask, 65535
+ %.splatinsert = insertelement <8 x i32> poison, i32 %0, i64 0
+ %.splat = shufflevector <8 x i32> %.splatinsert, <8 x i32> poison, <8 x i32> zeroinitializer
+ %1 = and <8 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128>
+ %hir.cmp.45 = icmp eq <8 x i32> %1, zeroinitializer
+ %2 = tail call <8 x float> @llvm.masked.load.v8f32.p0(ptr %b, i32 4, <8 x i1> %hir.cmp.45, <8 x float> poison)
+ %3 = tail call <8 x float> @llvm.masked.load.v8f32.p0(ptr %a, i32 4, <8 x i1> %hir.cmp.45, <8 x float> poison)
+ %4 = fadd reassoc nsz arcp contract afn <8 x float> %2, %3
+ tail call void @llvm.masked.store.v8f32.p0(<8 x float> %4, ptr %c, i32 4, <8 x i1> %hir.cmp.45)
+ ret void
+}
+
+define dso_local void @foo_4_ne(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
+; CHECK-LABEL: foo_4_ne:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovb %ecx, %k1
+; CHECK-NEXT: vmovups (%rdx), %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovups (%rsi), %xmm1 {%k1} {z}
+; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vmovups %xmm0, (%rdi) {%k1}
+; CHECK-NEXT: retq
+entry:
+ %0 = and i32 %mask, 65535
+ %.splatinsert = insertelement <4 x i32> poison, i32 %0, i64 0
+ %.splat = shufflevector <4 x i32> %.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ %1 = and <4 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8>
+ %hir.cmp.45 = icmp ne <4 x i32> %1, zeroinitializer
+ %2 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %b, i32 4, <4 x i1> %hir.cmp.45, <4 x float> poison)
+ %3 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %a, i32 4, <4 x i1> %hir.cmp.45, <4 x float> poison)
+ %4 = fadd reassoc nsz arcp contract afn <4 x float> %2, %3
+ tail call void @llvm.masked.store.v4f32.p0(<4 x float> %4, ptr %c, i32 4, <4 x i1> %hir.cmp.45)
+ ret void
+}
+
+define dso_local void @foo_4_eq(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
+; CHECK-LABEL: foo_4_eq:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovb %ecx, %k0
+; CHECK-NEXT: knotb %k0, %k1
+; CHECK-NEXT: vmovups (%rdx), %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovups (%rsi), %xmm1 {%k1} {z}
+; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vmovups %xmm0, (%rdi) {%k1}
+; CHECK-NEXT: retq
+entry:
+ %0 = and i32 %mask, 65535
+ %.splatinsert = insertelement <4 x i32> poison, i32 %0, i64 0
+ %.splat = shufflevector <4 x i32> %.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ %1 = and <4 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8>
+ %hir.cmp.45 = icmp eq <4 x i32> %1, zeroinitializer
+ %2 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %b, i32 4, <4 x i1> %hir.cmp.45, <4 x float> poison)
+ %3 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %a, i32 4, <4 x i1> %hir.cmp.45, <4 x float> poison)
+ %4 = fadd reassoc nsz arcp contract afn <4 x float> %2, %3
+ tail call void @llvm.masked.store.v4f32.p0(<4 x float> %4, ptr %c, i32 4, <4 x i1> %hir.cmp.45)
+ ret void
+}
+
+define dso_local void @foo_2_ne(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
+; CHECK-LABEL: foo_2_ne:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovb %ecx, %k0
+; CHECK-NEXT: kshiftlb $6, %k0, %k0
+; CHECK-NEXT: kshiftrb $6, %k0, %k1
+; CHECK-NEXT: vmovups (%rdx), %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovups (%rsi), %xmm1 {%k1} {z}
+; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vmovups %xmm0, (%rdi) {%k1}
+; CHECK-NEXT: retq
+entry:
+ %.splatinsert = insertelement <2 x i32> poison, i32 %mask, i64 0
+ %.splat = shufflevector <2 x i32> %.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
+ %0 = and <2 x i32> %.splat, <i32 1, i32 2>
+ %hir.cmp.44 = icmp ne <2 x i32> %0, zeroinitializer
+ %1 = tail call <2 x float> @llvm.masked.load.v2f32.p0(ptr %b, i32 4, <2 x i1> %hir.cmp.44, <2 x float> poison)
+ %2 = tail call <2 x float> @llvm.masked.load.v2f32.p0(ptr %a, i32 4, <2 x i1> %hir.cmp.44, <2 x float> poison)
+ %3 = fadd reassoc nsz arcp contract afn <2 x float> %1, %2
+ tail call void @llvm.masked.store.v2f32.p0(<2 x float> %3, ptr %c, i32 4, <2 x i1> %hir.cmp.44)
+ ret void
+}
+
+define dso_local void @foo_2_eq(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
+; CHECK-LABEL: foo_2_eq:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovb %ecx, %k0
+; CHECK-NEXT: knotb %k0, %k0
+; CHECK-NEXT: kshiftlb $6, %k0, %k0
+; CHECK-NEXT: kshiftrb $6, %k0, %k1
+; CHECK-NEXT: vmovups (%rdx), %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovups (%rsi), %xmm1 {%k1} {z}
+; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vmovups %xmm0, (%rdi) {%k1}
+; CHECK-NEXT: retq
+entry:
+ %.splatinsert = insertelement <2 x i32> poison, i32 %mask, i64 0
+ %.splat = shufflevector <2 x i32> %.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
+ %0 = and <2 x i32> %.splat, <i32 1, i32 2>
+ %hir.cmp.44 = icmp eq <2 x i32> %0, zeroinitializer
+ %1 = tail call <2 x float> @llvm.masked.load.v2f32.p0(ptr %b, i32 4, <2 x i1> %hir.cmp.44, <2 x float> poison)
+ %2 = tail call <2 x float> @llvm.masked.load.v2f32.p0(ptr %a, i32 4, <2 x i1> %hir.cmp.44, <2 x float> poison)
+ %3 = fadd reassoc nsz arcp contract afn <2 x float> %1, %2
+ tail call void @llvm.masked.store.v2f32.p0(<2 x float> %3, ptr %c, i32 4, <2 x i1> %hir.cmp.44)
+ ret void
+}
+
+declare <2 x float> @llvm.masked.load.v2f32.p0(ptr nocapture, i32 immarg, <2 x i1>, <2 x float>) #1
+
+declare void @llvm.masked.store.v2f32.p0(<2 x float>, ptr nocapture, i32 immarg, <2 x i1>) #2
+
+declare <4 x float> @llvm.masked.load.v4f32.p0(ptr nocapture, i32 immarg, <4 x i1>, <4 x float>) #1
+
+declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr nocapture, i32 immarg, <4 x i1>) #2
+
+declare <8 x float> @llvm.masked.load.v8f32.p0(ptr nocapture, i32 immarg, <8 x i1>, <8 x float>)
+
+declare void @llvm.masked.store.v8f32.p0(<8 x float>, ptr nocapture, i32 immarg, <8 x i1>)
+
+declare <16 x float> @llvm.masked.load.v16f32.p0(ptr nocapture, i32 immarg, <16 x i1>, <16 x float>)
+
+declare void @llvm.masked.store.v16f32.p0(<16 x float>, ptr nocapture, i32 immarg, <16 x i1>)
diff --git a/llvm/test/CodeGen/X86/pr78897.ll b/llvm/test/CodeGen/X86/pr78897.ll
index 56e4ec2bc8ecb..38a1800df956b 100644
--- a/llvm/test/CodeGen/X86/pr78897.ll
+++ b/llvm/test/CodeGen/X86/pr78897.ll
@@ -256,8 +256,8 @@ define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind {
;
; X64-AVX512-LABEL: produceShuffleVectorForByte:
; X64-AVX512: # %bb.0: # %entry
-; X64-AVX512-NEXT: vpbroadcastb %edi, %xmm0
-; X64-AVX512-NEXT: vptestnmb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k1
+; X64-AVX512-NEXT: kmovw %edi, %k0
+; X64-AVX512-NEXT: knotw %k0, %k1
; X64-AVX512-NEXT: vmovdqu8 {{.*#+}} xmm0 {%k1} {z} = [17,17,17,17,17,17,17,17,u,u,u,u,u,u,u,u]
; X64-AVX512-NEXT: vmovq %xmm0, %rax
; X64-AVX512-NEXT: movabsq $1229782938247303440, %rcx # imm = 0x1111111111111110
>From 3f39f655f46dc7f9f3a5d3218ecffcaeb782fccb Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Fri, 20 Dec 2024 12:29:36 +0530
Subject: [PATCH 02/17] Review Changes
---
llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 23 +++++++++++------------
llvm/test/CodeGen/X86/pr78897.ll | 5 +++--
2 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 8c199a30dfbce..054ff6743b9a5 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -4976,7 +4976,8 @@ bool X86DAGToDAGISel::tryVPTESTMOrKMOV(SDNode *Root, SDValue Setcc,
};
auto canUseKMOV = [&]() {
- if (Src0.getOpcode() != X86ISD::VBROADCAST)
+ if (Src0.getOpcode() != X86ISD::VBROADCAST &&
+ Src0.getOpcode() != X86ISD::VBROADCAST_LOAD)
return false;
if (Src1.getOpcode() != ISD::LOAD ||
@@ -4994,20 +4995,18 @@ bool X86DAGToDAGISel::tryVPTESTMOrKMOV(SDNode *Root, SDValue Setcc,
if (!ConstVecType)
return false;
- for (unsigned i = 0, e = ConstVecType->getNumElements(), k = 1; i != e;
- ++i, k *= 2) {
- const auto *Element = ConstVec->getAggregateElement(i);
+ for (unsigned I = 0, E = ConstVecType->getNumElements(); I != E; ++I) {
+ const auto *Element = ConstVec->getAggregateElement(I);
if (llvm::isa<llvm::UndefValue>(Element)) {
- for (unsigned j = i + 1; j != e; ++j) {
- if (!llvm::isa<llvm::UndefValue>(ConstVec->getAggregateElement(j)))
+ for (unsigned J = I + 1; J != E; ++J) {
+ if (!llvm::isa<llvm::UndefValue>(ConstVec->getAggregateElement(J)))
return false;
}
- return i != 0;
+ return I != 0;
}
- if (Element->getUniqueInteger() != k) {
+ if (Element->getUniqueInteger() != 1 << I)
return false;
- }
}
return true;
@@ -5024,10 +5023,10 @@ bool X86DAGToDAGISel::tryVPTESTMOrKMOV(SDNode *Root, SDValue Setcc,
MVT ResVT = Setcc.getSimpleValueType();
if (CanFoldLoads) {
if (canUseKMOV()) {
- auto Op = Src0.getOperand(0);
- if (Op.getSimpleValueType() == MVT::i8) {
+ auto Op = Src0.getOpcode() == X86ISD::VBROADCAST ? Src0.getOperand(0)
+ : Src0.getOperand(1);
+ if (Op.getSimpleValueType() == MVT::i8)
Op = SDValue(CurDAG->getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Op));
- }
CNode = CurDAG->getMachineNode(
ResVT.getVectorNumElements() <= 8 ? X86::KMOVBkr : X86::KMOVWkr, dl,
ResVT, Op);
diff --git a/llvm/test/CodeGen/X86/pr78897.ll b/llvm/test/CodeGen/X86/pr78897.ll
index 38a1800df956b..0c4c03de5901e 100644
--- a/llvm/test/CodeGen/X86/pr78897.ll
+++ b/llvm/test/CodeGen/X86/pr78897.ll
@@ -223,8 +223,9 @@ define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind {
; X86-AVX512-NEXT: pushl %ebx
; X86-AVX512-NEXT: pushl %edi
; X86-AVX512-NEXT: pushl %esi
-; X86-AVX512-NEXT: vpbroadcastb {{[0-9]+}}(%esp), %xmm0
-; X86-AVX512-NEXT: vptestnmb {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %k1
+; X86-AVX512-NEXT: leal {{[0-9]+}}(%esp)
+; X86-AVX512-NEXT: kmovw %eax, %k0
+; X86-AVX512-NEXT: knotw %k0, %k1
; X86-AVX512-NEXT: vmovdqu8 {{.*#+}} xmm0 {%k1} {z} = [17,17,17,17,17,17,17,17,u,u,u,u,u,u,u,u]
; X86-AVX512-NEXT: vpextrd $1, %xmm0, %eax
; X86-AVX512-NEXT: vmovd %xmm0, %edx
>From 57c4aa0ad3fa524f15fb03acee7e92cab250f979 Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Mon, 23 Dec 2024 11:06:02 +0530
Subject: [PATCH 03/17] Update tests
- Remove attributes
- Remove fast math flags
- Simplify tests by removing mask/loads
---
llvm/test/CodeGen/X86/kmov.ll | 179 ++++++++++------------------------
1 file changed, 52 insertions(+), 127 deletions(-)
diff --git a/llvm/test/CodeGen/X86/kmov.ll b/llvm/test/CodeGen/X86/kmov.ll
index 6d72a8923c5ab..f17a559012e67 100644
--- a/llvm/test/CodeGen/X86/kmov.ll
+++ b/llvm/test/CodeGen/X86/kmov.ll
@@ -1,108 +1,73 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skylake-avx512 | FileCheck %s
-define dso_local void @foo_16_ne(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
-; CHECK-LABEL: foo_16_ne:
+define <16 x i1> @pr120593_16_ne(i32 %mask) {
+; CHECK-LABEL: pr120593_16_ne:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovw %ecx, %k1
-; CHECK-NEXT: vmovups (%rdx), %zmm0 {%k1} {z}
-; CHECK-NEXT: vmovups (%rsi), %zmm1 {%k1} {z}
-; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
-; CHECK-NEXT: vmovups %zmm0, (%rdi) {%k1}
-; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: vpmovm2b %k0, %xmm0
; CHECK-NEXT: retq
entry:
%0 = and i32 %mask, 65535
%.splatinsert = insertelement <16 x i32> poison, i32 %0, i64 0
%.splat = shufflevector <16 x i32> %.splatinsert, <16 x i32> poison, <16 x i32> zeroinitializer
%1 = and <16 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768>
- %hir.cmp.45 = icmp ne <16 x i32> %1, zeroinitializer
- %2 = tail call <16 x float> @llvm.masked.load.v16f32.p0(ptr %b, i32 4, <16 x i1> %hir.cmp.45, <16 x float> poison)
- %3 = tail call <16 x float> @llvm.masked.load.v16f32.p0(ptr %a, i32 4, <16 x i1> %hir.cmp.45, <16 x float> poison)
- %4 = fadd reassoc nsz arcp contract afn <16 x float> %2, %3
- tail call void @llvm.masked.store.v16f32.p0(<16 x float> %4, ptr %c, i32 4, <16 x i1> %hir.cmp.45)
- ret void
+ %cmp.45 = icmp ne <16 x i32> %1, zeroinitializer
+ ret <16 x i1> %cmp.45
}
-; Function Attrs: mustprogress nounwind uwtable
-define dso_local void @foo_16_eq(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
-; CHECK-LABEL: foo_16_eq:
+define <16 x i1> @pr120593_16_eq(i32 %mask) {
+; CHECK-LABEL: pr120593_16_eq:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovw %ecx, %k0
-; CHECK-NEXT: knotw %k0, %k1
-; CHECK-NEXT: vmovups (%rdx), %zmm0 {%k1} {z}
-; CHECK-NEXT: vmovups (%rsi), %zmm1 {%k1} {z}
-; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
-; CHECK-NEXT: vmovups %zmm0, (%rdi) {%k1}
-; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: knotw %k0, %k0
+; CHECK-NEXT: vpmovm2b %k0, %xmm0
; CHECK-NEXT: retq
entry:
%0 = and i32 %mask, 65535
%.splatinsert = insertelement <16 x i32> poison, i32 %0, i64 0
%.splat = shufflevector <16 x i32> %.splatinsert, <16 x i32> poison, <16 x i32> zeroinitializer
%1 = and <16 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768>
- %hir.cmp.45 = icmp eq <16 x i32> %1, zeroinitializer
- %2 = tail call <16 x float> @llvm.masked.load.v16f32.p0(ptr %b, i32 4, <16 x i1> %hir.cmp.45, <16 x float> poison)
- %3 = tail call <16 x float> @llvm.masked.load.v16f32.p0(ptr %a, i32 4, <16 x i1> %hir.cmp.45, <16 x float> poison)
- %4 = fadd reassoc nsz arcp contract afn <16 x float> %2, %3
- tail call void @llvm.masked.store.v16f32.p0(<16 x float> %4, ptr %c, i32 4, <16 x i1> %hir.cmp.45)
- ret void
+ %cmp.45 = icmp eq <16 x i32> %1, zeroinitializer
+ ret <16 x i1> %cmp.45
}
-define dso_local void @foo_8_ne(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
-; CHECK-LABEL: foo_8_ne:
+define <8 x i1> @pr120593_8_ne(i32 %mask) {
+; CHECK-LABEL: pr120593_8_ne:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovb %ecx, %k1
-; CHECK-NEXT: vmovups (%rdx), %ymm0 {%k1} {z}
-; CHECK-NEXT: vmovups (%rsi), %ymm1 {%k1} {z}
-; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0
-; CHECK-NEXT: vmovups %ymm0, (%rdi) {%k1}
-; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: kmovb %edi, %k0
+; CHECK-NEXT: vpmovm2w %k0, %xmm0
; CHECK-NEXT: retq
entry:
%0 = and i32 %mask, 65535
%.splatinsert = insertelement <8 x i32> poison, i32 %0, i64 0
%.splat = shufflevector <8 x i32> %.splatinsert, <8 x i32> poison, <8 x i32> zeroinitializer
%1 = and <8 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128>
- %hir.cmp.45 = icmp ne <8 x i32> %1, zeroinitializer
- %2 = tail call <8 x float> @llvm.masked.load.v8f32.p0(ptr %b, i32 4, <8 x i1> %hir.cmp.45, <8 x float> poison)
- %3 = tail call <8 x float> @llvm.masked.load.v8f32.p0(ptr %a, i32 4, <8 x i1> %hir.cmp.45, <8 x float> poison)
- %4 = fadd reassoc nsz arcp contract afn <8 x float> %2, %3
- tail call void @llvm.masked.store.v8f32.p0(<8 x float> %4, ptr %c, i32 4, <8 x i1> %hir.cmp.45)
- ret void
+ %cmp.45 = icmp ne <8 x i32> %1, zeroinitializer
+ ret <8 x i1> %cmp.45
}
-define dso_local void @foo_8_eq(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
-; CHECK-LABEL: foo_8_eq:
+define <8 x i1> @pr120593_8_eq(i32 %mask) {
+; CHECK-LABEL: pr120593_8_eq:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovb %ecx, %k0
-; CHECK-NEXT: knotb %k0, %k1
-; CHECK-NEXT: vmovups (%rdx), %ymm0 {%k1} {z}
-; CHECK-NEXT: vmovups (%rsi), %ymm1 {%k1} {z}
-; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0
-; CHECK-NEXT: vmovups %ymm0, (%rdi) {%k1}
-; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: kmovb %edi, %k0
+; CHECK-NEXT: knotb %k0, %k0
+; CHECK-NEXT: vpmovm2w %k0, %xmm0
; CHECK-NEXT: retq
entry:
%0 = and i32 %mask, 65535
%.splatinsert = insertelement <8 x i32> poison, i32 %0, i64 0
%.splat = shufflevector <8 x i32> %.splatinsert, <8 x i32> poison, <8 x i32> zeroinitializer
%1 = and <8 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128>
- %hir.cmp.45 = icmp eq <8 x i32> %1, zeroinitializer
- %2 = tail call <8 x float> @llvm.masked.load.v8f32.p0(ptr %b, i32 4, <8 x i1> %hir.cmp.45, <8 x float> poison)
- %3 = tail call <8 x float> @llvm.masked.load.v8f32.p0(ptr %a, i32 4, <8 x i1> %hir.cmp.45, <8 x float> poison)
- %4 = fadd reassoc nsz arcp contract afn <8 x float> %2, %3
- tail call void @llvm.masked.store.v8f32.p0(<8 x float> %4, ptr %c, i32 4, <8 x i1> %hir.cmp.45)
- ret void
+ %cmp.45 = icmp eq <8 x i32> %1, zeroinitializer
+ ret <8 x i1> %cmp.45
}
-define dso_local void @foo_4_ne(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
-; CHECK-LABEL: foo_4_ne:
+define void @pr120593_4_ne(ptr %c, ptr %b, i32 %mask) {
+; CHECK-LABEL: pr120593_4_ne:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovb %ecx, %k1
-; CHECK-NEXT: vmovups (%rdx), %xmm0 {%k1} {z}
-; CHECK-NEXT: vmovups (%rsi), %xmm1 {%k1} {z}
-; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: kmovb %edx, %k1
+; CHECK-NEXT: vmovups (%rsi), %xmm0 {%k1} {z}
; CHECK-NEXT: vmovups %xmm0, (%rdi) {%k1}
; CHECK-NEXT: retq
entry:
@@ -110,22 +75,18 @@ entry:
%.splatinsert = insertelement <4 x i32> poison, i32 %0, i64 0
%.splat = shufflevector <4 x i32> %.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
%1 = and <4 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8>
- %hir.cmp.45 = icmp ne <4 x i32> %1, zeroinitializer
- %2 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %b, i32 4, <4 x i1> %hir.cmp.45, <4 x float> poison)
- %3 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %a, i32 4, <4 x i1> %hir.cmp.45, <4 x float> poison)
- %4 = fadd reassoc nsz arcp contract afn <4 x float> %2, %3
- tail call void @llvm.masked.store.v4f32.p0(<4 x float> %4, ptr %c, i32 4, <4 x i1> %hir.cmp.45)
+ %cmp.45 = icmp ne <4 x i32> %1, zeroinitializer
+ %2 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %b, i32 4, <4 x i1> %cmp.45, <4 x float> poison)
+ tail call void @llvm.masked.store.v4f32.p0(<4 x float> %2, ptr %c, i32 4, <4 x i1> %cmp.45)
ret void
}
-define dso_local void @foo_4_eq(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
-; CHECK-LABEL: foo_4_eq:
+define void @pr120593_4_eq(ptr %c, ptr %b, i32 %mask) {
+; CHECK-LABEL: pr120593_4_eq:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovb %ecx, %k0
+; CHECK-NEXT: kmovb %edx, %k0
; CHECK-NEXT: knotb %k0, %k1
-; CHECK-NEXT: vmovups (%rdx), %xmm0 {%k1} {z}
-; CHECK-NEXT: vmovups (%rsi), %xmm1 {%k1} {z}
-; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vmovups (%rsi), %xmm0 {%k1} {z}
; CHECK-NEXT: vmovups %xmm0, (%rdi) {%k1}
; CHECK-NEXT: retq
entry:
@@ -133,73 +94,37 @@ entry:
%.splatinsert = insertelement <4 x i32> poison, i32 %0, i64 0
%.splat = shufflevector <4 x i32> %.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
%1 = and <4 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8>
- %hir.cmp.45 = icmp eq <4 x i32> %1, zeroinitializer
- %2 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %b, i32 4, <4 x i1> %hir.cmp.45, <4 x float> poison)
- %3 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %a, i32 4, <4 x i1> %hir.cmp.45, <4 x float> poison)
- %4 = fadd reassoc nsz arcp contract afn <4 x float> %2, %3
- tail call void @llvm.masked.store.v4f32.p0(<4 x float> %4, ptr %c, i32 4, <4 x i1> %hir.cmp.45)
+ %cmp.45 = icmp eq <4 x i32> %1, zeroinitializer
+ %2 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %b, i32 4, <4 x i1> %cmp.45, <4 x float> poison)
+ tail call void @llvm.masked.store.v4f32.p0(<4 x float> %2, ptr %c, i32 4, <4 x i1> %cmp.45)
ret void
}
-define dso_local void @foo_2_ne(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
-; CHECK-LABEL: foo_2_ne:
+define <2 x i1> @pr120593_2_ne(i32 %mask) {
+; CHECK-LABEL: pr120593_2_ne:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovb %ecx, %k0
-; CHECK-NEXT: kshiftlb $6, %k0, %k0
-; CHECK-NEXT: kshiftrb $6, %k0, %k1
-; CHECK-NEXT: vmovups (%rdx), %xmm0 {%k1} {z}
-; CHECK-NEXT: vmovups (%rsi), %xmm1 {%k1} {z}
-; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vmovups %xmm0, (%rdi) {%k1}
+; CHECK-NEXT: kmovb %edi, %k0
+; CHECK-NEXT: vpmovm2q %k0, %xmm0
; CHECK-NEXT: retq
entry:
%.splatinsert = insertelement <2 x i32> poison, i32 %mask, i64 0
%.splat = shufflevector <2 x i32> %.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
%0 = and <2 x i32> %.splat, <i32 1, i32 2>
- %hir.cmp.44 = icmp ne <2 x i32> %0, zeroinitializer
- %1 = tail call <2 x float> @llvm.masked.load.v2f32.p0(ptr %b, i32 4, <2 x i1> %hir.cmp.44, <2 x float> poison)
- %2 = tail call <2 x float> @llvm.masked.load.v2f32.p0(ptr %a, i32 4, <2 x i1> %hir.cmp.44, <2 x float> poison)
- %3 = fadd reassoc nsz arcp contract afn <2 x float> %1, %2
- tail call void @llvm.masked.store.v2f32.p0(<2 x float> %3, ptr %c, i32 4, <2 x i1> %hir.cmp.44)
- ret void
+ %cmp.44 = icmp ne <2 x i32> %0, zeroinitializer
+ ret <2 x i1> %cmp.44
}
-define dso_local void @foo_2_eq(ptr nocapture noundef writeonly %c, ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b, i32 noundef %mask) {
-; CHECK-LABEL: foo_2_eq:
+define <2 x i1> @pr120593_2_eq(i32 %mask) {
+; CHECK-LABEL: pr120593_2_eq:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovb %ecx, %k0
+; CHECK-NEXT: kmovb %edi, %k0
; CHECK-NEXT: knotb %k0, %k0
-; CHECK-NEXT: kshiftlb $6, %k0, %k0
-; CHECK-NEXT: kshiftrb $6, %k0, %k1
-; CHECK-NEXT: vmovups (%rdx), %xmm0 {%k1} {z}
-; CHECK-NEXT: vmovups (%rsi), %xmm1 {%k1} {z}
-; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vmovups %xmm0, (%rdi) {%k1}
+; CHECK-NEXT: vpmovm2q %k0, %xmm0
; CHECK-NEXT: retq
entry:
%.splatinsert = insertelement <2 x i32> poison, i32 %mask, i64 0
%.splat = shufflevector <2 x i32> %.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
%0 = and <2 x i32> %.splat, <i32 1, i32 2>
- %hir.cmp.44 = icmp eq <2 x i32> %0, zeroinitializer
- %1 = tail call <2 x float> @llvm.masked.load.v2f32.p0(ptr %b, i32 4, <2 x i1> %hir.cmp.44, <2 x float> poison)
- %2 = tail call <2 x float> @llvm.masked.load.v2f32.p0(ptr %a, i32 4, <2 x i1> %hir.cmp.44, <2 x float> poison)
- %3 = fadd reassoc nsz arcp contract afn <2 x float> %1, %2
- tail call void @llvm.masked.store.v2f32.p0(<2 x float> %3, ptr %c, i32 4, <2 x i1> %hir.cmp.44)
- ret void
+ %cmp.44 = icmp eq <2 x i32> %0, zeroinitializer
+ ret <2 x i1> %cmp.44
}
-
-declare <2 x float> @llvm.masked.load.v2f32.p0(ptr nocapture, i32 immarg, <2 x i1>, <2 x float>) #1
-
-declare void @llvm.masked.store.v2f32.p0(<2 x float>, ptr nocapture, i32 immarg, <2 x i1>) #2
-
-declare <4 x float> @llvm.masked.load.v4f32.p0(ptr nocapture, i32 immarg, <4 x i1>, <4 x float>) #1
-
-declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr nocapture, i32 immarg, <4 x i1>) #2
-
-declare <8 x float> @llvm.masked.load.v8f32.p0(ptr nocapture, i32 immarg, <8 x i1>, <8 x float>)
-
-declare void @llvm.masked.store.v8f32.p0(<8 x float>, ptr nocapture, i32 immarg, <8 x i1>)
-
-declare <16 x float> @llvm.masked.load.v16f32.p0(ptr nocapture, i32 immarg, <16 x i1>, <16 x float>)
-
-declare void @llvm.masked.store.v16f32.p0(<16 x float>, ptr nocapture, i32 immarg, <16 x i1>)
>From 85b9945ef9cde51353f6e96bd68270b62c542d06 Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Tue, 24 Dec 2024 15:24:33 +0530
Subject: [PATCH 04/17] Combine to KMOV
Combine to KMOV instead of doing it in ISEL
---
llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 78 ++++----------------
llvm/lib/Target/X86/X86ISelLowering.cpp | 95 +++++++++++++++++++++++++
llvm/test/CodeGen/X86/kmov.ll | 22 +++---
llvm/test/CodeGen/X86/pr78897.ll | 6 +-
4 files changed, 122 insertions(+), 79 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index daf3e01506374..9b340a778b36a 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -592,7 +592,7 @@ namespace {
bool matchVPTERNLOG(SDNode *Root, SDNode *ParentA, SDNode *ParentB,
SDNode *ParentC, SDValue A, SDValue B, SDValue C,
uint8_t Imm);
- bool tryVPTESTMOrKMOV(SDNode *Root, SDValue Setcc, SDValue Mask);
+ bool tryVPTESTM(SDNode *Root, SDValue Setcc, SDValue Mask);
bool tryMatchBitSelect(SDNode *N);
MachineSDNode *emitPCMPISTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
@@ -4898,10 +4898,10 @@ VPTESTM_CASE(v32i16, WZ##SUFFIX)
#undef VPTESTM_CASE
}
-// Try to create VPTESTM or KMOV instruction. If InMask is not null, it will be
-// used to form a masked operation.
-bool X86DAGToDAGISel::tryVPTESTMOrKMOV(SDNode *Root, SDValue Setcc,
- SDValue InMask) {
+// Try to create VPTESTM instruction. If InMask is not null, it will be used
+// to form a masked operation.
+bool X86DAGToDAGISel::tryVPTESTM(SDNode *Root, SDValue Setcc,
+ SDValue InMask) {
assert(Subtarget->hasAVX512() && "Expected AVX512!");
assert(Setcc.getSimpleValueType().getVectorElementType() == MVT::i1 &&
"Unexpected VT!");
@@ -4976,69 +4976,12 @@ bool X86DAGToDAGISel::tryVPTESTMOrKMOV(SDNode *Root, SDValue Setcc,
return tryFoldBroadcast(Root, P, L, Base, Scale, Index, Disp, Segment);
};
- auto canUseKMOV = [&]() {
- if (Src0.getOpcode() != X86ISD::VBROADCAST &&
- Src0.getOpcode() != X86ISD::VBROADCAST_LOAD)
- return false;
-
- if (Src1.getOpcode() != ISD::LOAD ||
- Src1.getOperand(1).getOpcode() != X86ISD::Wrapper ||
- Src1.getOperand(1).getOperand(0).getOpcode() != ISD::TargetConstantPool)
- return false;
-
- const auto *ConstPool =
- dyn_cast<ConstantPoolSDNode>(Src1.getOperand(1).getOperand(0));
- if (!ConstPool)
- return false;
-
- const auto *ConstVec = ConstPool->getConstVal();
- const auto *ConstVecType = dyn_cast<FixedVectorType>(ConstVec->getType());
- if (!ConstVecType)
- return false;
-
- for (unsigned I = 0, E = ConstVecType->getNumElements(); I != E; ++I) {
- const auto *Element = ConstVec->getAggregateElement(I);
- if (llvm::isa<llvm::UndefValue>(Element)) {
- for (unsigned J = I + 1; J != E; ++J) {
- if (!llvm::isa<llvm::UndefValue>(ConstVec->getAggregateElement(J)))
- return false;
- }
- return I != 0;
- }
-
- if (Element->getUniqueInteger() != 1 << I)
- return false;
- }
-
- return true;
- };
-
// We can only fold loads if the sources are unique.
bool CanFoldLoads = Src0 != Src1;
bool FoldedLoad = false;
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
- SDLoc dl(Root);
- bool IsTestN = CC == ISD::SETEQ;
- MachineSDNode *CNode;
- MVT ResVT = Setcc.getSimpleValueType();
if (CanFoldLoads) {
- if (canUseKMOV()) {
- auto Op = Src0.getOpcode() == X86ISD::VBROADCAST ? Src0.getOperand(0)
- : Src0.getOperand(1);
- if (Op.getSimpleValueType() == MVT::i8)
- Op = SDValue(CurDAG->getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Op));
- CNode = CurDAG->getMachineNode(
- ResVT.getVectorNumElements() <= 8 ? X86::KMOVBkr : X86::KMOVWkr, dl,
- ResVT, Op);
- if (IsTestN)
- CNode = CurDAG->getMachineNode(
- ResVT.getVectorNumElements() <= 8 ? X86::KNOTBkk : X86::KNOTWkk, dl,
- ResVT, SDValue(CNode, 0));
- ReplaceUses(SDValue(Root, 0), SDValue(CNode, 0));
- CurDAG->RemoveDeadNode(Root);
- return true;
- }
FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src1, Tmp0, Tmp1, Tmp2,
Tmp3, Tmp4);
if (!FoldedLoad) {
@@ -5054,6 +4997,9 @@ bool X86DAGToDAGISel::tryVPTESTMOrKMOV(SDNode *Root, SDValue Setcc,
bool IsMasked = InMask.getNode() != nullptr;
+ SDLoc dl(Root);
+
+ MVT ResVT = Setcc.getSimpleValueType();
MVT MaskVT = ResVT;
if (Widen) {
// Widen the inputs using insert_subreg or copy_to_regclass.
@@ -5078,9 +5024,11 @@ bool X86DAGToDAGISel::tryVPTESTMOrKMOV(SDNode *Root, SDValue Setcc,
}
}
+ bool IsTestN = CC == ISD::SETEQ;
unsigned Opc = getVPTESTMOpc(CmpVT, IsTestN, FoldedLoad, FoldedBCast,
IsMasked);
+ MachineSDNode *CNode;
if (FoldedLoad) {
SDVTList VTs = CurDAG->getVTList(MaskVT, MVT::Other);
@@ -5519,10 +5467,10 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
SDValue N0 = Node->getOperand(0);
SDValue N1 = Node->getOperand(1);
if (N0.getOpcode() == ISD::SETCC && N0.hasOneUse() &&
- tryVPTESTMOrKMOV(Node, N0, N1))
+ tryVPTESTM(Node, N0, N1))
return;
if (N1.getOpcode() == ISD::SETCC && N1.hasOneUse() &&
- tryVPTESTMOrKMOV(Node, N1, N0))
+ tryVPTESTM(Node, N1, N0))
return;
}
@@ -6446,7 +6394,7 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
}
case ISD::SETCC: {
- if (NVT.isVector() && tryVPTESTMOrKMOV(Node, SDValue(Node, 0), SDValue()))
+ if (NVT.isVector() && tryVPTESTM(Node, SDValue(Node, 0), SDValue()))
return;
break;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2528ca553d3e9..e61bb46a683ec 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -55288,6 +55288,95 @@ static SDValue truncateAVX512SetCCNoBWI(EVT VT, EVT OpVT, SDValue LHS,
return SDValue();
}
+static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
+ const SDLoc &DL, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ if (CC != ISD::SETNE && CC != ISD::SETEQ)
+ return SDValue();
+
+ if (!Subtarget.hasAVX512())
+ return SDValue();
+
+ if (Op0.getOpcode() != ISD::AND)
+ return SDValue();
+
+ SDValue Broadcast = Op0.getOperand(0);
+ if (Broadcast.getOpcode() != X86ISD::VBROADCAST &&
+ Broadcast.getOpcode() != X86ISD::VBROADCAST_LOAD)
+ return SDValue();
+
+ SDValue Load = Op0.getOperand(1);
+ if (Load.getOpcode() != ISD::LOAD)
+ return SDValue();
+
+ SDValue Wrapper = Load.getOperand(1);
+ if (Wrapper.getOpcode() != X86ISD::Wrapper)
+ return SDValue();
+
+ const auto *TargetConstPool =
+ dyn_cast<ConstantPoolSDNode>(Wrapper.getOperand(0));
+ if (!TargetConstPool)
+ return SDValue();
+
+ const auto *ConstVec = TargetConstPool->getConstVal();
+ const auto *ConstVecType = dyn_cast<FixedVectorType>(ConstVec->getType());
+ if (!ConstVecType)
+ return SDValue();
+
+ const auto *First = ConstVec->getAggregateElement(0U);
+ if (llvm::isa<UndefValue>(First) || !First->getUniqueInteger().isPowerOf2())
+ return SDValue();
+
+ unsigned N = First->getUniqueInteger().logBase2();
+
+ for (unsigned I = 1, E = ConstVecType->getNumElements(); I < E; ++I) {
+ const auto *Element = ConstVec->getAggregateElement(I);
+ if (llvm::isa<llvm::UndefValue>(Element)) {
+ for (unsigned J = I + 1; J != E; ++J) {
+ if (!llvm::isa<llvm::UndefValue>(ConstVec->getAggregateElement(J)))
+ return SDValue();
+ }
+ break;
+ }
+
+ if (Element->getUniqueInteger() != 1 << (I + N))
+ return SDValue();
+ }
+
+ SDValue BroadcastOp = Broadcast.getOpcode() == X86ISD::VBROADCAST
+ ? Broadcast.getOperand(0)
+ : Broadcast.getOperand(1);
+ MVT BroadcastOpVT = BroadcastOp.getSimpleValueType();
+ unsigned Len = VT.getVectorNumElements();
+ SDValue Masked = BroadcastOp;
+ if (N != 0) {
+ unsigned Mask = (1ULL << Len) - 1;
+ SDValue ShiftedValue = DAG.getNode(ISD::SRL, DL, BroadcastOpVT, BroadcastOp,
+ DAG.getConstant(N, DL, BroadcastOpVT));
+ Masked = DAG.getNode(ISD::AND, DL, BroadcastOpVT, ShiftedValue,
+ DAG.getConstant(Mask, DL, BroadcastOpVT));
+ }
+ SDValue Trunc = DAG.getNode(BroadcastOpVT.bitsGT(MVT::i16) ? ISD::TRUNCATE
+ : ISD::ANY_EXTEND,
+ DL, MVT::i16, Masked);
+ SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, MVT::v16i1, Trunc);
+ MVT PtrTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
+
+ if (CC == ISD::SETEQ)
+ Bitcast = DAG.getNode(
+ ISD::XOR, DL, MVT::v16i1, Bitcast,
+ DAG.getSplatBuildVector(
+ MVT::v16i1, DL,
+ DAG.getConstant(APInt::getAllOnes(PtrTy.getSizeInBits()), DL,
+ PtrTy)));
+
+ if (VT != MVT::v16i1)
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Bitcast,
+ DAG.getConstant(0, DL, PtrTy));
+
+ return Bitcast;
+}
+
static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
@@ -55420,6 +55509,12 @@ static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
"Unexpected condition code!");
return Op0.getOperand(0);
}
+
+ if (IsVZero1) {
+ if (SDValue V =
+ combineAVX512SetCCToKMOV(VT, Op0, TmpCC, DL, DAG, Subtarget))
+ return V;
+ }
}
// Try and make unsigned vector comparison signed. On pre AVX512 targets there
diff --git a/llvm/test/CodeGen/X86/kmov.ll b/llvm/test/CodeGen/X86/kmov.ll
index f17a559012e67..ba39fc4d1af76 100644
--- a/llvm/test/CodeGen/X86/kmov.ll
+++ b/llvm/test/CodeGen/X86/kmov.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=skylake-avx512 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s
define <16 x i1> @pr120593_16_ne(i32 %mask) {
; CHECK-LABEL: pr120593_16_ne:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: vpmovm2b %k0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -19,7 +19,7 @@ entry:
define <16 x i1> @pr120593_16_eq(i32 %mask) {
; CHECK-LABEL: pr120593_16_eq:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: knotw %k0, %k0
; CHECK-NEXT: vpmovm2b %k0, %xmm0
; CHECK-NEXT: retq
@@ -35,7 +35,7 @@ entry:
define <8 x i1> @pr120593_8_ne(i32 %mask) {
; CHECK-LABEL: pr120593_8_ne:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovb %edi, %k0
+; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: vpmovm2w %k0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -50,7 +50,7 @@ entry:
define <8 x i1> @pr120593_8_eq(i32 %mask) {
; CHECK-LABEL: pr120593_8_eq:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovb %edi, %k0
+; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: knotb %k0, %k0
; CHECK-NEXT: vpmovm2w %k0, %xmm0
; CHECK-NEXT: retq
@@ -66,7 +66,7 @@ entry:
define void @pr120593_4_ne(ptr %c, ptr %b, i32 %mask) {
; CHECK-LABEL: pr120593_4_ne:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovb %edx, %k1
+; CHECK-NEXT: kmovd %edx, %k1
; CHECK-NEXT: vmovups (%rsi), %xmm0 {%k1} {z}
; CHECK-NEXT: vmovups %xmm0, (%rdi) {%k1}
; CHECK-NEXT: retq
@@ -84,8 +84,8 @@ entry:
define void @pr120593_4_eq(ptr %c, ptr %b, i32 %mask) {
; CHECK-LABEL: pr120593_4_eq:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovb %edx, %k0
-; CHECK-NEXT: knotb %k0, %k1
+; CHECK-NEXT: kmovd %edx, %k0
+; CHECK-NEXT: knotw %k0, %k1
; CHECK-NEXT: vmovups (%rsi), %xmm0 {%k1} {z}
; CHECK-NEXT: vmovups %xmm0, (%rdi) {%k1}
; CHECK-NEXT: retq
@@ -103,7 +103,7 @@ entry:
define <2 x i1> @pr120593_2_ne(i32 %mask) {
; CHECK-LABEL: pr120593_2_ne:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovb %edi, %k0
+; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: vpmovm2q %k0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -117,8 +117,8 @@ entry:
define <2 x i1> @pr120593_2_eq(i32 %mask) {
; CHECK-LABEL: pr120593_2_eq:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovb %edi, %k0
-; CHECK-NEXT: knotb %k0, %k0
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: knotw %k0, %k0
; CHECK-NEXT: vpmovm2q %k0, %xmm0
; CHECK-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/pr78897.ll b/llvm/test/CodeGen/X86/pr78897.ll
index 0c4c03de5901e..c3c597f4d79de 100644
--- a/llvm/test/CodeGen/X86/pr78897.ll
+++ b/llvm/test/CodeGen/X86/pr78897.ll
@@ -223,8 +223,8 @@ define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind {
; X86-AVX512-NEXT: pushl %ebx
; X86-AVX512-NEXT: pushl %edi
; X86-AVX512-NEXT: pushl %esi
-; X86-AVX512-NEXT: leal {{[0-9]+}}(%esp)
-; X86-AVX512-NEXT: kmovw %eax, %k0
+; X86-AVX512-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-AVX512-NEXT: kmovd %eax, %k0
; X86-AVX512-NEXT: knotw %k0, %k1
; X86-AVX512-NEXT: vmovdqu8 {{.*#+}} xmm0 {%k1} {z} = [17,17,17,17,17,17,17,17,u,u,u,u,u,u,u,u]
; X86-AVX512-NEXT: vpextrd $1, %xmm0, %eax
@@ -257,7 +257,7 @@ define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind {
;
; X64-AVX512-LABEL: produceShuffleVectorForByte:
; X64-AVX512: # %bb.0: # %entry
-; X64-AVX512-NEXT: kmovw %edi, %k0
+; X64-AVX512-NEXT: kmovd %edi, %k0
; X64-AVX512-NEXT: knotw %k0, %k1
; X64-AVX512-NEXT: vmovdqu8 {{.*#+}} xmm0 {%k1} {z} = [17,17,17,17,17,17,17,17,u,u,u,u,u,u,u,u]
; X64-AVX512-NEXT: vmovq %xmm0, %rax
>From 1ae411419f3cf017f9d876d5d627e7f89e103953 Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Wed, 22 Jan 2025 17:00:12 +0530
Subject: [PATCH 05/17] Use getTargetConstantBitsFromNode
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 43 +++++++++----------------
1 file changed, 15 insertions(+), 28 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a7921246e0dbc..299d29bf88798 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -55311,40 +55311,28 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
return SDValue();
SDValue Load = Op0.getOperand(1);
- if (Load.getOpcode() != ISD::LOAD)
- return SDValue();
-
- SDValue Wrapper = Load.getOperand(1);
- if (Wrapper.getOpcode() != X86ISD::Wrapper)
- return SDValue();
-
- const auto *TargetConstPool =
- dyn_cast<ConstantPoolSDNode>(Wrapper.getOperand(0));
- if (!TargetConstPool)
- return SDValue();
-
- const auto *ConstVec = TargetConstPool->getConstVal();
- const auto *ConstVecType = dyn_cast<FixedVectorType>(ConstVec->getType());
- if (!ConstVecType)
- return SDValue();
+ EVT LoadVT = Load.getSimpleValueType();
- const auto *First = ConstVec->getAggregateElement(0U);
- if (llvm::isa<UndefValue>(First) || !First->getUniqueInteger().isPowerOf2())
+ APInt UndefElts;
+ SmallVector<APInt, 32> EltBits;
+ if (!getTargetConstantBitsFromNode(Load, LoadVT.getScalarSizeInBits(),
+ UndefElts, EltBits,
+ /*AllowWholeUndefs*/ true,
+ /*AllowPartialUndefs*/ false) ||
+ UndefElts[0] || !EltBits[0].isPowerOf2())
return SDValue();
- unsigned N = First->getUniqueInteger().logBase2();
-
- for (unsigned I = 1, E = ConstVecType->getNumElements(); I < E; ++I) {
- const auto *Element = ConstVec->getAggregateElement(I);
- if (llvm::isa<llvm::UndefValue>(Element)) {
- for (unsigned J = I + 1; J != E; ++J) {
- if (!llvm::isa<llvm::UndefValue>(ConstVec->getAggregateElement(J)))
+ unsigned N = EltBits[0].logBase2();
+ unsigned Len = UndefElts.getBitWidth();
+ for (unsigned I = 1; I != Len; ++I) {
+ if (UndefElts[I]) {
+ for (unsigned J = I + 1; J != Len; ++J)
+ if (!UndefElts[J])
return SDValue();
- }
break;
}
- if (Element->getUniqueInteger() != 1 << (I + N))
+ if (EltBits[I] != 1 << (N + I))
return SDValue();
}
@@ -55352,7 +55340,6 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
? Broadcast.getOperand(0)
: Broadcast.getOperand(1);
MVT BroadcastOpVT = BroadcastOp.getSimpleValueType();
- unsigned Len = VT.getVectorNumElements();
SDValue Masked = BroadcastOp;
if (N != 0) {
unsigned Mask = (1ULL << Len) - 1;
>From ca6c2468fb6e98a01f6caf02c206b2022feba6af Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Thu, 23 Jan 2025 00:26:36 +0530
Subject: [PATCH 06/17] Update test
---
llvm/test/CodeGen/X86/kmov.ll | 58 +++++++++++++++++++++++------------
1 file changed, 39 insertions(+), 19 deletions(-)
diff --git a/llvm/test/CodeGen/X86/kmov.ll b/llvm/test/CodeGen/X86/kmov.ll
index ba39fc4d1af76..02667ba9df453 100644
--- a/llvm/test/CodeGen/X86/kmov.ll
+++ b/llvm/test/CodeGen/X86/kmov.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s
-define <16 x i1> @pr120593_16_ne(i32 %mask) {
-; CHECK-LABEL: pr120593_16_ne:
+define <16 x i1> @mask_16(i32 %mask) {
+; CHECK-LABEL: mask_16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: vpmovm2b %k0, %xmm0
@@ -16,8 +16,8 @@ entry:
ret <16 x i1> %cmp.45
}
-define <16 x i1> @pr120593_16_eq(i32 %mask) {
-; CHECK-LABEL: pr120593_16_eq:
+define <16 x i1> @invert_mask_16(i32 %mask) {
+; CHECK-LABEL: invert_mask_16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: knotw %k0, %k0
@@ -32,8 +32,8 @@ entry:
ret <16 x i1> %cmp.45
}
-define <8 x i1> @pr120593_8_ne(i32 %mask) {
-; CHECK-LABEL: pr120593_8_ne:
+define <8 x i1> @mask_8(i32 %mask) {
+; CHECK-LABEL: mask_8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: vpmovm2w %k0, %xmm0
@@ -47,8 +47,8 @@ entry:
ret <8 x i1> %cmp.45
}
-define <8 x i1> @pr120593_8_eq(i32 %mask) {
-; CHECK-LABEL: pr120593_8_eq:
+define <8 x i1> @invert_mask_8(i32 %mask) {
+; CHECK-LABEL: invert_mask_8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: knotb %k0, %k0
@@ -63,8 +63,8 @@ entry:
ret <8 x i1> %cmp.45
}
-define void @pr120593_4_ne(ptr %c, ptr %b, i32 %mask) {
-; CHECK-LABEL: pr120593_4_ne:
+define void @mask_4(ptr %c, ptr %b, i32 %mask) {
+; CHECK-LABEL: mask_4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edx, %k1
; CHECK-NEXT: vmovups (%rsi), %xmm0 {%k1} {z}
@@ -81,8 +81,8 @@ entry:
ret void
}
-define void @pr120593_4_eq(ptr %c, ptr %b, i32 %mask) {
-; CHECK-LABEL: pr120593_4_eq:
+define void @invert_mask_4(ptr %c, ptr %b, i32 %mask) {
+; CHECK-LABEL: invert_mask_4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edx, %k0
; CHECK-NEXT: knotw %k0, %k1
@@ -100,10 +100,11 @@ entry:
ret void
}
-define <2 x i1> @pr120593_2_ne(i32 %mask) {
-; CHECK-LABEL: pr120593_2_ne:
+define <2 x i1> @mask_2(i32 %mask) {
+; CHECK-LABEL: mask_2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: vpbroadcastd %edi, %xmm0
+; CHECK-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0
; CHECK-NEXT: vpmovm2q %k0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -114,11 +115,11 @@ entry:
ret <2 x i1> %cmp.44
}
-define <2 x i1> @pr120593_2_eq(i32 %mask) {
-; CHECK-LABEL: pr120593_2_eq:
+define <2 x i1> @invert_mask_2(i32 %mask) {
+; CHECK-LABEL: invert_mask_2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: knotw %k0, %k0
+; CHECK-NEXT: vpbroadcastd %edi, %xmm0
+; CHECK-NEXT: vptestnmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0
; CHECK-NEXT: vpmovm2q %k0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -128,3 +129,22 @@ entry:
%cmp.44 = icmp eq <2 x i32> %0, zeroinitializer
ret <2 x i1> %cmp.44
}
+
+define <16 x i1> @multi_mask_16(i32 %mask) {
+; CHECK-LABEL: multi_mask_16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: kshiftrd $16, %k0, %k1
+; CHECK-NEXT: kandw %k1, %k0, %k0
+; CHECK-NEXT: vpmovm2b %k0, %xmm0
+; CHECK-NEXT: retq
+entry:
+ %.splatinsert = insertelement <16 x i32> poison, i32 %mask, i64 0
+ %.splat = shufflevector <16 x i32> %.splatinsert, <16 x i32> poison, <16 x i32> zeroinitializer
+ %1 = and <16 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768>
+ %cmp.45 = icmp ne <16 x i32> %1, zeroinitializer
+ %2 = and <16 x i32> %.splat, <i32 65536, i32 131072, i32 262144, i32 524288, i32 1048576, i32 2097152, i32 4194304, i32 8388608, i32 16777216, i32 33554432, i32 67108864, i32 134217728, i32 268435456, i32 536870912, i32 1073741824, i32 -2147483648>
+ %cmp.46 = icmp ne <16 x i32> %2, zeroinitializer
+ %result = and <16 x i1> %cmp.45, %cmp.46
+ ret <16 x i1> %result
+}
>From 606d7f6eb919f915bd6aaef739e722ebf9ba6416 Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Thu, 23 Jan 2025 00:27:54 +0530
Subject: [PATCH 07/17] Update X86ISelLowering.cpp
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 4d55146b3dafc..67650aa3666b0 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -55480,13 +55480,12 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
unsigned Len = UndefElts.getBitWidth();
for (unsigned I = 1; I != Len; ++I) {
if (UndefElts[I]) {
- for (unsigned J = I + 1; J != Len; ++J)
- if (!UndefElts[J])
- return SDValue();
+ if (!UndefElts.lshr(I + 1).isAllOnes())
+ return SDValue();
break;
}
- if (EltBits[I] != 1 << (N + I))
+ if (EltBits[I] != 1ULL << (N + I))
return SDValue();
}
>From abd6a4d3aa606718af9521d6df016efc271fd4dc Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Thu, 23 Jan 2025 12:21:55 +0530
Subject: [PATCH 08/17] Update X86ISelLowering.cpp
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 67650aa3666b0..b9631b9f54348 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -55480,7 +55480,7 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
unsigned Len = UndefElts.getBitWidth();
for (unsigned I = 1; I != Len; ++I) {
if (UndefElts[I]) {
- if (!UndefElts.lshr(I + 1).isAllOnes())
+ if (!UndefElts.extractBits(Len - (I + 1), I + 1).isAllOnes())
return SDValue();
break;
}
>From 1fef3b320a804c802a510c5fc83a77a47aee0cc6 Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Wed, 5 Feb 2025 14:13:47 +0530
Subject: [PATCH 09/17] fix reviews
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 10 +-
llvm/test/CodeGen/X86/kmov.ll | 396 ++++++++++++++++++------
2 files changed, 316 insertions(+), 90 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index b9631b9f54348..88c81e091d2d1 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -55447,6 +55447,9 @@ static SDValue truncateAVX512SetCCNoBWI(EVT VT, EVT OpVT, SDValue LHS,
return SDValue();
}
+// The pattern (setcc (and (broadcast x), (2^n, 2^{n+1}, ...)), (0, 0, ...),
+// eq/ne) is generated when using an integer as a mask. Instead of generating a
+// broadcast + vptest, we can directly move the integer to a mask register.
static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
const SDLoc &DL, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
@@ -55476,6 +55479,9 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
UndefElts[0] || !EltBits[0].isPowerOf2())
return SDValue();
+ // Check if the constant pool contains only powers of 2 starting from some
+ // 2^N. The table may also contain undefs because of widening of vector
+ // operands.
unsigned N = EltBits[0].logBase2();
unsigned Len = UndefElts.getBitWidth();
for (unsigned I = 1; I != Len; ++I) {
@@ -55501,9 +55507,7 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
Masked = DAG.getNode(ISD::AND, DL, BroadcastOpVT, ShiftedValue,
DAG.getConstant(Mask, DL, BroadcastOpVT));
}
- SDValue Trunc = DAG.getNode(BroadcastOpVT.bitsGT(MVT::i16) ? ISD::TRUNCATE
- : ISD::ANY_EXTEND,
- DL, MVT::i16, Masked);
+ SDValue Trunc = DAG.getAnyExtOrTrunc(Masked, DL, MVT::i16);
SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, MVT::v16i1, Trunc);
MVT PtrTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
diff --git a/llvm/test/CodeGen/X86/kmov.ll b/llvm/test/CodeGen/X86/kmov.ll
index 02667ba9df453..0512c80d75faf 100644
--- a/llvm/test/CodeGen/X86/kmov.ll
+++ b/llvm/test/CodeGen/X86/kmov.ll
@@ -1,141 +1,187 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s
-define <16 x i1> @mask_16(i32 %mask) {
-; CHECK-LABEL: mask_16:
+define <2 x i1> @i8_mask_extract2(i8 %mask) {
+; CHECK-LABEL: i8_mask_extract2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: vpmovm2b %k0, %xmm0
+; CHECK-NEXT: vpmovm2q %k0, %xmm0
; CHECK-NEXT: retq
entry:
- %0 = and i32 %mask, 65535
- %.splatinsert = insertelement <16 x i32> poison, i32 %0, i64 0
- %.splat = shufflevector <16 x i32> %.splatinsert, <16 x i32> poison, <16 x i32> zeroinitializer
- %1 = and <16 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768>
- %cmp.45 = icmp ne <16 x i32> %1, zeroinitializer
- ret <16 x i1> %cmp.45
+ %.splatinsert = insertelement <2 x i8> poison, i8 %mask, i64 0
+ %.splat = shufflevector <2 x i8> %.splatinsert, <2 x i8> poison, <2 x i32> zeroinitializer
+ %1 = and <2 x i8> %.splat, <i8 1, i8 2>
+ %cmp.45 = icmp ne <2 x i8> %1, zeroinitializer
+ ret <2 x i1> %cmp.45
}
-define <16 x i1> @invert_mask_16(i32 %mask) {
-; CHECK-LABEL: invert_mask_16:
+define <2 x i1> @invert_i8_mask_extract2(i8 %mask) {
+; CHECK-LABEL: invert_i8_mask_extract2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: knotw %k0, %k0
-; CHECK-NEXT: vpmovm2b %k0, %xmm0
+; CHECK-NEXT: vpmovm2q %k0, %xmm0
; CHECK-NEXT: retq
entry:
- %0 = and i32 %mask, 65535
- %.splatinsert = insertelement <16 x i32> poison, i32 %0, i64 0
- %.splat = shufflevector <16 x i32> %.splatinsert, <16 x i32> poison, <16 x i32> zeroinitializer
- %1 = and <16 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768>
- %cmp.45 = icmp eq <16 x i32> %1, zeroinitializer
- ret <16 x i1> %cmp.45
+ %.splatinsert = insertelement <2 x i8> poison, i8 %mask, i64 0
+ %.splat = shufflevector <2 x i8> %.splatinsert, <2 x i8> poison, <2 x i32> zeroinitializer
+ %1 = and <2 x i8> %.splat, <i8 1, i8 2>
+ %cmp.45 = icmp eq <2 x i8> %1, zeroinitializer
+ ret <2 x i1> %cmp.45
+}
+
+define <4 x i1> @i8_mask_extract_4(i8 %mask) {
+; CHECK-LABEL: i8_mask_extract_4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: vpmovm2d %k0, %xmm0
+; CHECK-NEXT: retq
+entry:
+ %.splatinsert = insertelement <4 x i8> poison, i8 %mask, i64 0
+ %.splat = shufflevector <4 x i8> %.splatinsert, <4 x i8> poison, <4 x i32> zeroinitializer
+ %1 = and <4 x i8> %.splat, <i8 1, i8 2, i8 4, i8 8>
+ %cmp.45 = icmp ne <4 x i8> %1, zeroinitializer
+ ret <4 x i1> %cmp.45
}
-define <8 x i1> @mask_8(i32 %mask) {
-; CHECK-LABEL: mask_8:
+define <4 x i1> @invert_i8_mask_extract_4(i8 %mask) {
+; CHECK-LABEL: invert_i8_mask_extract_4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: knotw %k0, %k0
+; CHECK-NEXT: vpmovm2d %k0, %xmm0
+; CHECK-NEXT: retq
+entry:
+ %.splatinsert = insertelement <4 x i8> poison, i8 %mask, i64 0
+ %.splat = shufflevector <4 x i8> %.splatinsert, <4 x i8> poison, <4 x i32> zeroinitializer
+ %1 = and <4 x i8> %.splat, <i8 1, i8 2, i8 4, i8 8>
+ %cmp.45 = icmp eq <4 x i8> %1, zeroinitializer
+ ret <4 x i1> %cmp.45
+}
+
+define <8 x i1> @i8_mask_extract_8(i8 %mask) {
+; CHECK-LABEL: i8_mask_extract_8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: vpmovm2w %k0, %xmm0
; CHECK-NEXT: retq
entry:
- %0 = and i32 %mask, 65535
- %.splatinsert = insertelement <8 x i32> poison, i32 %0, i64 0
- %.splat = shufflevector <8 x i32> %.splatinsert, <8 x i32> poison, <8 x i32> zeroinitializer
- %1 = and <8 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128>
- %cmp.45 = icmp ne <8 x i32> %1, zeroinitializer
+ %.splatinsert = insertelement <8 x i8> poison, i8 %mask, i64 0
+ %.splat = shufflevector <8 x i8> %.splatinsert, <8 x i8> poison, <8 x i32> zeroinitializer
+ %1 = and <8 x i8> %.splat, <i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128>
+ %cmp.45 = icmp ne <8 x i8> %1, zeroinitializer
ret <8 x i1> %cmp.45
}
-define <8 x i1> @invert_mask_8(i32 %mask) {
-; CHECK-LABEL: invert_mask_8:
+define <8 x i1> @invert_i8_mask_extract_8(i8 %mask) {
+; CHECK-LABEL: invert_i8_mask_extract_8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: knotb %k0, %k0
; CHECK-NEXT: vpmovm2w %k0, %xmm0
; CHECK-NEXT: retq
entry:
- %0 = and i32 %mask, 65535
- %.splatinsert = insertelement <8 x i32> poison, i32 %0, i64 0
- %.splat = shufflevector <8 x i32> %.splatinsert, <8 x i32> poison, <8 x i32> zeroinitializer
- %1 = and <8 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128>
- %cmp.45 = icmp eq <8 x i32> %1, zeroinitializer
+ %.splatinsert = insertelement <8 x i8> poison, i8 %mask, i64 0
+ %.splat = shufflevector <8 x i8> %.splatinsert, <8 x i8> poison, <8 x i32> zeroinitializer
+ %1 = and <8 x i8> %.splat, <i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128>
+ %cmp.45 = icmp eq <8 x i8> %1, zeroinitializer
ret <8 x i1> %cmp.45
}
-define void @mask_4(ptr %c, ptr %b, i32 %mask) {
-; CHECK-LABEL: mask_4:
+define <4 x i1> @i16_mask_extract_4(i16 %mask) {
+; CHECK-LABEL: i16_mask_extract_4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edx, %k1
-; CHECK-NEXT: vmovups (%rsi), %xmm0 {%k1} {z}
-; CHECK-NEXT: vmovups %xmm0, (%rdi) {%k1}
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: vpmovm2d %k0, %xmm0
; CHECK-NEXT: retq
entry:
- %0 = and i32 %mask, 65535
- %.splatinsert = insertelement <4 x i32> poison, i32 %0, i64 0
- %.splat = shufflevector <4 x i32> %.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
- %1 = and <4 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8>
- %cmp.45 = icmp ne <4 x i32> %1, zeroinitializer
- %2 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %b, i32 4, <4 x i1> %cmp.45, <4 x float> poison)
- tail call void @llvm.masked.store.v4f32.p0(<4 x float> %2, ptr %c, i32 4, <4 x i1> %cmp.45)
- ret void
+ %.splatinsert = insertelement <4 x i16> poison, i16 %mask, i64 0
+ %.splat = shufflevector <4 x i16> %.splatinsert, <4 x i16> poison, <4 x i32> zeroinitializer
+ %1 = and <4 x i16> %.splat, <i16 1, i16 2, i16 4, i16 8>
+ %cmp.45 = icmp ne <4 x i16> %1, zeroinitializer
+ ret <4 x i1> %cmp.45
}
-define void @invert_mask_4(ptr %c, ptr %b, i32 %mask) {
-; CHECK-LABEL: invert_mask_4:
+define <4 x i1> @invert_i16_mask_extract_4(i16 %mask) {
+; CHECK-LABEL: invert_i16_mask_extract_4:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edx, %k0
-; CHECK-NEXT: knotw %k0, %k1
-; CHECK-NEXT: vmovups (%rsi), %xmm0 {%k1} {z}
-; CHECK-NEXT: vmovups %xmm0, (%rdi) {%k1}
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: knotw %k0, %k0
+; CHECK-NEXT: vpmovm2d %k0, %xmm0
; CHECK-NEXT: retq
entry:
- %0 = and i32 %mask, 65535
- %.splatinsert = insertelement <4 x i32> poison, i32 %0, i64 0
- %.splat = shufflevector <4 x i32> %.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
- %1 = and <4 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8>
- %cmp.45 = icmp eq <4 x i32> %1, zeroinitializer
- %2 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %b, i32 4, <4 x i1> %cmp.45, <4 x float> poison)
- tail call void @llvm.masked.store.v4f32.p0(<4 x float> %2, ptr %c, i32 4, <4 x i1> %cmp.45)
- ret void
+ %.splatinsert = insertelement <4 x i16> poison, i16 %mask, i64 0
+ %.splat = shufflevector <4 x i16> %.splatinsert, <4 x i16> poison, <4 x i32> zeroinitializer
+ %1 = and <4 x i16> %.splat, <i16 1, i16 2, i16 4, i16 8>
+ %cmp.45 = icmp eq <4 x i16> %1, zeroinitializer
+ ret <4 x i1> %cmp.45
}
-define <2 x i1> @mask_2(i32 %mask) {
-; CHECK-LABEL: mask_2:
+define <8 x i1> @i16_mask_extract_8(i16 %mask) {
+; CHECK-LABEL: i16_mask_extract_8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vpbroadcastd %edi, %xmm0
-; CHECK-NEXT: vptestmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0
-; CHECK-NEXT: vpmovm2q %k0, %xmm0
+; CHECK-NEXT: vpbroadcastw %edi, %xmm0
+; CHECK-NEXT: vpmovzxbw {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; CHECK-NEXT: vpand %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
- %.splatinsert = insertelement <2 x i32> poison, i32 %mask, i64 0
- %.splat = shufflevector <2 x i32> %.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
- %0 = and <2 x i32> %.splat, <i32 1, i32 2>
- %cmp.44 = icmp ne <2 x i32> %0, zeroinitializer
- ret <2 x i1> %cmp.44
+ %.splatinsert = insertelement <8 x i16> poison, i16 %mask, i64 0
+ %.splat = shufflevector <8 x i16> %.splatinsert, <8 x i16> poison, <8 x i32> zeroinitializer
+ %1 = and <8 x i16> %.splat, <i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128>
+ %cmp.45 = icmp ne <8 x i16> %1, zeroinitializer
+ ret <8 x i1> %cmp.45
}
-define <2 x i1> @invert_mask_2(i32 %mask) {
-; CHECK-LABEL: invert_mask_2:
+define <8 x i1> @invert_i16_mask_extract_8(i16 %mask) {
+; CHECK-LABEL: invert_i16_mask_extract_8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vpbroadcastd %edi, %xmm0
-; CHECK-NEXT: vptestnmd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %k0
-; CHECK-NEXT: vpmovm2q %k0, %xmm0
+; CHECK-NEXT: vpbroadcastw %edi, %xmm0
+; CHECK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
- %.splatinsert = insertelement <2 x i32> poison, i32 %mask, i64 0
- %.splat = shufflevector <2 x i32> %.splatinsert, <2 x i32> poison, <2 x i32> zeroinitializer
- %0 = and <2 x i32> %.splat, <i32 1, i32 2>
- %cmp.44 = icmp eq <2 x i32> %0, zeroinitializer
- ret <2 x i1> %cmp.44
+ %.splatinsert = insertelement <8 x i16> poison, i16 %mask, i64 0
+ %.splat = shufflevector <8 x i16> %.splatinsert, <8 x i16> poison, <8 x i32> zeroinitializer
+ %1 = and <8 x i16> %.splat, <i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128>
+ %cmp.45 = icmp eq <8 x i16> %1, zeroinitializer
+ ret <8 x i1> %cmp.45
}
-define <16 x i1> @multi_mask_16(i32 %mask) {
-; CHECK-LABEL: multi_mask_16:
+define <16 x i1> @i16_mask_extract_16(i16 %mask) {
+; CHECK-LABEL: i16_mask_extract_16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: vpmovm2b %k0, %xmm0
+; CHECK-NEXT: retq
+entry:
+ %.splatinsert = insertelement <16 x i16> poison, i16 %mask, i64 0
+ %.splat = shufflevector <16 x i16> %.splatinsert, <16 x i16> poison, <16 x i32> zeroinitializer
+ %1 = and <16 x i16> %.splat, <i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 512, i16 1024, i16 2048, i16 4096, i16 8192, i16 16384, i16 32768>
+ %cmp.45 = icmp ne <16 x i16> %1, zeroinitializer
+ ret <16 x i1> %cmp.45
+}
+
+define <16 x i1> @invert_i16_mask_extract_16(i16 %mask) {
+; CHECK-LABEL: invert_i16_mask_extract_16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: knotw %k0, %k0
+; CHECK-NEXT: vpmovm2b %k0, %xmm0
+; CHECK-NEXT: retq
+entry:
+ %.splatinsert = insertelement <16 x i16> poison, i16 %mask, i64 0
+ %.splat = shufflevector <16 x i16> %.splatinsert, <16 x i16> poison, <16 x i32> zeroinitializer
+ %1 = and <16 x i16> %.splat, <i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 512, i16 1024, i16 2048, i16 4096, i16 8192, i16 16384, i16 32768>
+ %cmp.45 = icmp eq <16 x i16> %1, zeroinitializer
+ ret <16 x i1> %cmp.45
+}
+
+define <16 x i1> @i32_mask_extract_16(i32 %mask) {
+; CHECK-LABEL: i32_mask_extract_16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: kshiftrd $16, %k0, %k1
-; CHECK-NEXT: kandw %k1, %k0, %k0
; CHECK-NEXT: vpmovm2b %k0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -143,8 +189,184 @@ entry:
%.splat = shufflevector <16 x i32> %.splatinsert, <16 x i32> poison, <16 x i32> zeroinitializer
%1 = and <16 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768>
%cmp.45 = icmp ne <16 x i32> %1, zeroinitializer
- %2 = and <16 x i32> %.splat, <i32 65536, i32 131072, i32 262144, i32 524288, i32 1048576, i32 2097152, i32 4194304, i32 8388608, i32 16777216, i32 33554432, i32 67108864, i32 134217728, i32 268435456, i32 536870912, i32 1073741824, i32 -2147483648>
- %cmp.46 = icmp ne <16 x i32> %2, zeroinitializer
- %result = and <16 x i1> %cmp.45, %cmp.46
- ret <16 x i1> %result
+ ret <16 x i1> %cmp.45
}
+
+define <16 x i1> @invert_i32_mask_extract_16(i32 %mask) {
+; CHECK-LABEL: invert_i32_mask_extract_16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: knotw %k0, %k0
+; CHECK-NEXT: vpmovm2b %k0, %xmm0
+; CHECK-NEXT: retq
+entry:
+ %.splatinsert = insertelement <16 x i32> poison, i32 %mask, i64 0
+ %.splat = shufflevector <16 x i32> %.splatinsert, <16 x i32> poison, <16 x i32> zeroinitializer
+ %1 = and <16 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768>
+ %cmp.45 = icmp eq <16 x i32> %1, zeroinitializer
+ ret <16 x i1> %cmp.45
+}
+
+define <32 x i1> @i32_mask_extract_32(i32 %mask) {
+; CHECK-LABEL: i32_mask_extract_32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: kshiftrd $16, %k0, %k1
+; CHECK-NEXT: kunpckwd %k0, %k1, %k0
+; CHECK-NEXT: vpmovm2b %k0, %ymm0
+; CHECK-NEXT: retq
+entry:
+ %.splatinsert = insertelement <32 x i32> poison, i32 %mask, i64 0
+ %.splat = shufflevector <32 x i32> %.splatinsert, <32 x i32> poison, <32 x i32> zeroinitializer
+ %1 = and <32 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768, i32 65536, i32 131072, i32 262144, i32 524288, i32 1048576, i32 2097152, i32 4194304, i32 8388608, i32 16777216, i32 33554432, i32 67108864, i32 134217728, i32 268435456, i32 536870912, i32 1073741824, i32 2147483648>
+ %cmp.45 = icmp ne <32 x i32> %1, zeroinitializer
+ ret <32 x i1> %cmp.45
+}
+
+define <32 x i1> @invert_i32_mask_extract_32(i32 %mask) {
+; CHECK-LABEL: invert_i32_mask_extract_32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovd %edi, %k0
+; CHECK-NEXT: kshiftrd $16, %k0, %k1
+; CHECK-NEXT: kunpckwd %k0, %k1, %k0
+; CHECK-NEXT: vpmovm2b %k0, %ymm0
+; CHECK-NEXT: retq
+entry:
+ %.splatinsert = insertelement <32 x i32> poison, i32 %mask, i64 0
+ %.splat = shufflevector <32 x i32> %.splatinsert, <32 x i32> poison, <32 x i32> zeroinitializer
+ %1 = and <32 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768, i32 65536, i32 131072, i32 262144, i32 524288, i32 1048576, i32 2097152, i32 4194304, i32 8388608, i32 16777216, i32 33554432, i32 67108864, i32 134217728, i32 268435456, i32 536870912, i32 1073741824, i32 2147483648>
+ %cmp.45 = icmp ne <32 x i32> %1, zeroinitializer
+ ret <32 x i1> %cmp.45
+}
+
+define <32 x i1> @i64_mask_extract_32(i64 %mask) {
+; CHECK-LABEL: i64_mask_extract_32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: kmovd %eax, %k0
+; CHECK-NEXT: movzbl %ah, %ecx
+; CHECK-NEXT: kmovd %ecx, %k1
+; CHECK-NEXT: kunpckbw %k0, %k1, %k0
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: shrl $24, %ecx
+; CHECK-NEXT: kmovd %ecx, %k1
+; CHECK-NEXT: shrl $16, %eax
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: kmovd %eax, %k2
+; CHECK-NEXT: kunpckbw %k2, %k1, %k1
+; CHECK-NEXT: kunpckwd %k0, %k1, %k0
+; CHECK-NEXT: vpmovm2b %k0, %ymm0
+; CHECK-NEXT: retq
+entry:
+ %.splatinsert = insertelement <32 x i64> poison, i64 %mask, i64 0
+ %.splat = shufflevector <32 x i64> %.splatinsert, <32 x i64> poison, <32 x i32> zeroinitializer
+ %1 = and <32 x i64> %.splat, <i64 1, i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256, i64 512, i64 1024, i64 2048, i64 4096, i64 8192, i64 16384, i64 32768, i64 65536, i64 131072, i64 262144, i64 524288, i64 1048576, i64 2097152, i64 4194304, i64 8388608, i64 16777216, i64 33554432, i64 67108864, i64 134217728, i64 268435456, i64 536870912, i64 1073741824, i64 2147483648>
+ %cmp.45 = icmp ne <32 x i64> %1, zeroinitializer
+ ret <32 x i1> %cmp.45
+}
+
+define <32 x i1> @invert_i64_mask_extract_32(i64 %mask) {
+; CHECK-LABEL: invert_i64_mask_extract_32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovq %rdi, %k0
+; CHECK-NEXT: knotb %k0, %k1
+; CHECK-NEXT: kshiftrd $8, %k0, %k2
+; CHECK-NEXT: knotb %k2, %k2
+; CHECK-NEXT: kunpckbw %k1, %k2, %k1
+; CHECK-NEXT: kshiftrd $16, %k0, %k2
+; CHECK-NEXT: knotb %k2, %k2
+; CHECK-NEXT: kshiftrd $24, %k0, %k0
+; CHECK-NEXT: knotb %k0, %k0
+; CHECK-NEXT: kunpckbw %k2, %k0, %k0
+; CHECK-NEXT: kunpckwd %k1, %k0, %k0
+; CHECK-NEXT: vpmovm2b %k0, %ymm0
+; CHECK-NEXT: retq
+entry:
+ %.splatinsert = insertelement <32 x i64> poison, i64 %mask, i64 0
+ %.splat = shufflevector <32 x i64> %.splatinsert, <32 x i64> poison, <32 x i32> zeroinitializer
+ %1 = and <32 x i64> %.splat, <i64 1, i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256, i64 512, i64 1024, i64 2048, i64 4096, i64 8192, i64 16384, i64 32768, i64 65536, i64 131072, i64 262144, i64 524288, i64 1048576, i64 2097152, i64 4194304, i64 8388608, i64 16777216, i64 33554432, i64 67108864, i64 134217728, i64 268435456, i64 536870912, i64 1073741824, i64 2147483648>
+ %cmp.45 = icmp eq <32 x i64> %1, zeroinitializer
+ ret <32 x i1> %cmp.45
+}
+
+define <64 x i1> @i64_mask_extract_64(i64 %mask) {
+; CHECK-LABEL: i64_mask_extract_64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: kmovd %eax, %k0
+; CHECK-NEXT: movzbl %ah, %ecx
+; CHECK-NEXT: kmovd %ecx, %k1
+; CHECK-NEXT: kunpckbw %k0, %k1, %k0
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: shrl $24, %ecx
+; CHECK-NEXT: kmovd %ecx, %k1
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: shrl $16, %ecx
+; CHECK-NEXT: movzbl %cl, %ecx
+; CHECK-NEXT: kmovd %ecx, %k2
+; CHECK-NEXT: kunpckbw %k2, %k1, %k1
+; CHECK-NEXT: kunpckwd %k0, %k1, %k0
+; CHECK-NEXT: movq %rdi, %rcx
+; CHECK-NEXT: shrq $32, %rcx
+; CHECK-NEXT: movzbl %cl, %ecx
+; CHECK-NEXT: kmovd %ecx, %k1
+; CHECK-NEXT: movq %rdi, %rcx
+; CHECK-NEXT: shrq $40, %rcx
+; CHECK-NEXT: movzbl %cl, %ecx
+; CHECK-NEXT: kmovd %ecx, %k2
+; CHECK-NEXT: kunpckbw %k1, %k2, %k1
+; CHECK-NEXT: movq %rdi, %rcx
+; CHECK-NEXT: shrq $56, %rcx
+; CHECK-NEXT: kmovd %ecx, %k2
+; CHECK-NEXT: shrq $48, %rax
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: kmovd %eax, %k3
+; CHECK-NEXT: kunpckbw %k3, %k2, %k2
+; CHECK-NEXT: kunpckwd %k1, %k2, %k1
+; CHECK-NEXT: kunpckdq %k0, %k1, %k0
+; CHECK-NEXT: vpmovm2b %k0, %zmm0
+; CHECK-NEXT: retq
+entry:
+ %.splatinsert = insertelement <64 x i64> poison, i64 %mask, i64 0
+ %.splat = shufflevector <64 x i64> %.splatinsert, <64 x i64> poison, <64 x i32> zeroinitializer
+ %1 = and <64 x i64> %.splat, <i64 1, i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256, i64 512, i64 1024, i64 2048, i64 4096, i64 8192, i64 16384, i64 32768, i64 65536, i64 131072, i64 262144, i64 524288, i64 1048576, i64 2097152, i64 4194304, i64 8388608, i64 16777216, i64 33554432, i64 67108864, i64 134217728, i64 268435456, i64 536870912, i64 1073741824, i64 2147483648, i64 4294967296, i64 8589934592, i64 17179869184, i64 34359738368, i64 68719476736, i64 137438953472, i64 274877906944, i64 549755813888, i64 1099511627776, i64 2199023255552, i64 4398046511104, i64 8796093022208, i64 17592186044416, i64 35184372088832, i64 70368744177664, i64 140737488355328, i64 281474976710656, i64 562949953421312, i64 1125899906842624, i64 2251799813685248, i64 4503599627370496, i64 9007199254740992, i64 18014398509481984, i64 36028797018963968, i64 72057594037927936, i64 144115188075855872, i64 288230376151711744, i64 576460752303423488, i64 1152921504606846976, i64 2305843009213693952, i64 4611686018427387904, i64 9223372036854775808>
+ %cmp.45 = icmp ne <64 x i64> %1, zeroinitializer
+ ret <64 x i1> %cmp.45
+}
+
+define <64 x i1> @invert_i64_mask_extract_64(i64 %mask) {
+; CHECK-LABEL: invert_i64_mask_extract_64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: kmovq %rdi, %k0
+; CHECK-NEXT: kshiftrq $32, %k0, %k1
+; CHECK-NEXT: knotb %k1, %k1
+; CHECK-NEXT: kshiftrq $40, %k0, %k2
+; CHECK-NEXT: knotb %k2, %k2
+; CHECK-NEXT: kunpckbw %k1, %k2, %k1
+; CHECK-NEXT: kshiftrq $48, %k0, %k2
+; CHECK-NEXT: knotb %k2, %k2
+; CHECK-NEXT: kshiftrq $56, %k0, %k3
+; CHECK-NEXT: knotb %k3, %k3
+; CHECK-NEXT: kunpckbw %k2, %k3, %k2
+; CHECK-NEXT: kunpckwd %k1, %k2, %k1
+; CHECK-NEXT: knotb %k0, %k2
+; CHECK-NEXT: kshiftrd $8, %k0, %k3
+; CHECK-NEXT: knotb %k3, %k3
+; CHECK-NEXT: kunpckbw %k2, %k3, %k2
+; CHECK-NEXT: kshiftrd $16, %k0, %k3
+; CHECK-NEXT: knotb %k3, %k3
+; CHECK-NEXT: kshiftrd $24, %k0, %k0
+; CHECK-NEXT: knotb %k0, %k0
+; CHECK-NEXT: kunpckbw %k3, %k0, %k0
+; CHECK-NEXT: kunpckwd %k2, %k0, %k0
+; CHECK-NEXT: kunpckdq %k0, %k1, %k0
+; CHECK-NEXT: vpmovm2b %k0, %zmm0
+; CHECK-NEXT: retq
+entry:
+ %.splatinsert = insertelement <64 x i64> poison, i64 %mask, i64 0
+ %.splat = shufflevector <64 x i64> %.splatinsert, <64 x i64> poison, <64 x i32> zeroinitializer
+ %1 = and <64 x i64> %.splat, <i64 1, i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256, i64 512, i64 1024, i64 2048, i64 4096, i64 8192, i64 16384, i64 32768, i64 65536, i64 131072, i64 262144, i64 524288, i64 1048576, i64 2097152, i64 4194304, i64 8388608, i64 16777216, i64 33554432, i64 67108864, i64 134217728, i64 268435456, i64 536870912, i64 1073741824, i64 2147483648, i64 4294967296, i64 8589934592, i64 17179869184, i64 34359738368, i64 68719476736, i64 137438953472, i64 274877906944, i64 549755813888, i64 1099511627776, i64 2199023255552, i64 4398046511104, i64 8796093022208, i64 17592186044416, i64 35184372088832, i64 70368744177664, i64 140737488355328, i64 281474976710656, i64 562949953421312, i64 1125899906842624, i64 2251799813685248, i64 4503599627370496, i64 9007199254740992, i64 18014398509481984, i64 36028797018963968, i64 72057594037927936, i64 144115188075855872, i64 288230376151711744, i64 576460752303423488, i64 1152921504606846976, i64 2305843009213693952, i64 4611686018427387904, i64 9223372036854775808>
+ %cmp.45 = icmp eq <64 x i64> %1, zeroinitializer
+ ret <64 x i1> %cmp.45
+}
+
>From cb5126576589d529f19c7af91766a6fd4f8663cf Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Thu, 6 Feb 2025 23:39:10 +0530
Subject: [PATCH 10/17] fix reviews
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 35 +-
llvm/test/CodeGen/X86/kmov.ll | 671 +++++++++++++++++-------
llvm/test/CodeGen/X86/pr78897.ll | 3 +-
3 files changed, 504 insertions(+), 205 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 88c81e091d2d1..2ed1dbcf70e36 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -55491,14 +55491,22 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
break;
}
- if (EltBits[I] != 1ULL << (N + I))
+ if (!EltBits[I].isOneBitSet(N + I))
return SDValue();
}
- SDValue BroadcastOp = Broadcast.getOpcode() == X86ISD::VBROADCAST
- ? Broadcast.getOperand(0)
- : Broadcast.getOperand(1);
- MVT BroadcastOpVT = BroadcastOp.getSimpleValueType();
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ const DataLayout &DL = DAG.getDataLayout();
+ MVT VecIdxTy = TLI.getVectorIdxTy(DL);
+ MVT BroadcastOpVT = Broadcast.getSimpleValueType().getVectorElementType();
+ SDValue BroadcastOp;
+ if (Broadcast.getOpcode() != X86ISD::VBROADCAST) {
+ BroadcastOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, BroadcastOpVT,
+ Broadcast, DAG.getConstant(0, DL, VecIdxTy));
+ } else {
+ BroadcastOp = Broadcast.getOperand(0);
+ }
+
SDValue Masked = BroadcastOp;
if (N != 0) {
unsigned Mask = (1ULL << Len) - 1;
@@ -55507,17 +55515,17 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
Masked = DAG.getNode(ISD::AND, DL, BroadcastOpVT, ShiftedValue,
DAG.getConstant(Mask, DL, BroadcastOpVT));
}
+ // We can't extract more than 16 bits using this pattern, because 2^{17} will
+ // not fit in an i16 and a vXi32 where X > 16 is more than 512 bits.
SDValue Trunc = DAG.getAnyExtOrTrunc(Masked, DL, MVT::i16);
SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, MVT::v16i1, Trunc);
- MVT PtrTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
+ MVT PtrTy = TLI.getPointerTy(DL);
if (CC == ISD::SETEQ)
- Bitcast = DAG.getNode(
- ISD::XOR, DL, MVT::v16i1, Bitcast,
- DAG.getSplatBuildVector(
- MVT::v16i1, DL,
- DAG.getConstant(APInt::getAllOnes(PtrTy.getSizeInBits()), DL,
- PtrTy)));
+ Bitcast =
+ DAG.getNode(ISD::XOR, DL, MVT::v16i1, Bitcast,
+ DAG.getSplatBuildVector(MVT::v16i1, DL,
+ DAG.getAllOnesConstant(DL, PtrTy)));
if (VT != MVT::v16i1)
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Bitcast,
@@ -55659,11 +55667,10 @@ static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
return Op0.getOperand(0);
}
- if (IsVZero1) {
+ if (IsVZero1)
if (SDValue V =
combineAVX512SetCCToKMOV(VT, Op0, TmpCC, DL, DAG, Subtarget))
return V;
- }
}
// Try and make unsigned vector comparison signed. On pre AVX512 targets there
diff --git a/llvm/test/CodeGen/X86/kmov.ll b/llvm/test/CodeGen/X86/kmov.ll
index 0512c80d75faf..1ff3ead3a3c86 100644
--- a/llvm/test/CodeGen/X86/kmov.ll
+++ b/llvm/test/CodeGen/X86/kmov.ll
@@ -1,12 +1,23 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=X64-AVX512
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=knl | FileCheck %s --check-prefixes=X64-KNL
define <2 x i1> @i8_mask_extract2(i8 %mask) {
-; CHECK-LABEL: i8_mask_extract2:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: vpmovm2q %k0, %xmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: i8_mask_extract2:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovd %edi, %k0
+; X64-AVX512-NEXT: vpmovm2q %k0, %xmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: i8_mask_extract2:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: vmovd %edi, %xmm0
+; X64-KNL-NEXT: vpbroadcastw {{.*#+}} xmm1 = [1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2]
+; X64-KNL-NEXT: vpbroadcastb %xmm0, %xmm0
+; X64-KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-KNL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; X64-KNL-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <2 x i8> poison, i8 %mask, i64 0
%.splat = shufflevector <2 x i8> %.splatinsert, <2 x i8> poison, <2 x i32> zeroinitializer
@@ -16,12 +27,22 @@ entry:
}
define <2 x i1> @invert_i8_mask_extract2(i8 %mask) {
-; CHECK-LABEL: invert_i8_mask_extract2:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: knotw %k0, %k0
-; CHECK-NEXT: vpmovm2q %k0, %xmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: invert_i8_mask_extract2:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovd %edi, %k0
+; X64-AVX512-NEXT: knotw %k0, %k0
+; X64-AVX512-NEXT: vpmovm2q %k0, %xmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: invert_i8_mask_extract2:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: vmovd %edi, %xmm0
+; X64-KNL-NEXT: vpbroadcastb %xmm0, %xmm0
+; X64-KNL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-KNL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; X64-KNL-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <2 x i8> poison, i8 %mask, i64 0
%.splat = shufflevector <2 x i8> %.splatinsert, <2 x i8> poison, <2 x i32> zeroinitializer
@@ -31,11 +52,21 @@ entry:
}
define <4 x i1> @i8_mask_extract_4(i8 %mask) {
-; CHECK-LABEL: i8_mask_extract_4:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: vpmovm2d %k0, %xmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: i8_mask_extract_4:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovd %edi, %k0
+; X64-AVX512-NEXT: vpmovm2d %k0, %xmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: i8_mask_extract_4:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: vmovd %edi, %xmm0
+; X64-KNL-NEXT: vpbroadcastb %xmm0, %xmm0
+; X64-KNL-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,2,4,8,1,2,4,8,1,2,4,8,1,2,4,8]
+; X64-KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-KNL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; X64-KNL-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <4 x i8> poison, i8 %mask, i64 0
%.splat = shufflevector <4 x i8> %.splatinsert, <4 x i8> poison, <4 x i32> zeroinitializer
@@ -45,12 +76,22 @@ entry:
}
define <4 x i1> @invert_i8_mask_extract_4(i8 %mask) {
-; CHECK-LABEL: invert_i8_mask_extract_4:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: knotw %k0, %k0
-; CHECK-NEXT: vpmovm2d %k0, %xmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: invert_i8_mask_extract_4:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovd %edi, %k0
+; X64-AVX512-NEXT: knotw %k0, %k0
+; X64-AVX512-NEXT: vpmovm2d %k0, %xmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: invert_i8_mask_extract_4:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: vmovd %edi, %xmm0
+; X64-KNL-NEXT: vpbroadcastb %xmm0, %xmm0
+; X64-KNL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-KNL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; X64-KNL-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <4 x i8> poison, i8 %mask, i64 0
%.splat = shufflevector <4 x i8> %.splatinsert, <4 x i8> poison, <4 x i32> zeroinitializer
@@ -60,11 +101,21 @@ entry:
}
define <8 x i1> @i8_mask_extract_8(i8 %mask) {
-; CHECK-LABEL: i8_mask_extract_8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: vpmovm2w %k0, %xmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: i8_mask_extract_8:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovd %edi, %k0
+; X64-AVX512-NEXT: vpmovm2w %k0, %xmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: i8_mask_extract_8:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: vmovd %edi, %xmm0
+; X64-KNL-NEXT: vpbroadcastb %xmm0, %xmm0
+; X64-KNL-NEXT: vpbroadcastq {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
+; X64-KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-KNL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; X64-KNL-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <8 x i8> poison, i8 %mask, i64 0
%.splat = shufflevector <8 x i8> %.splatinsert, <8 x i8> poison, <8 x i32> zeroinitializer
@@ -74,12 +125,22 @@ entry:
}
define <8 x i1> @invert_i8_mask_extract_8(i8 %mask) {
-; CHECK-LABEL: invert_i8_mask_extract_8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: knotb %k0, %k0
-; CHECK-NEXT: vpmovm2w %k0, %xmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: invert_i8_mask_extract_8:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovd %edi, %k0
+; X64-AVX512-NEXT: knotb %k0, %k0
+; X64-AVX512-NEXT: vpmovm2w %k0, %xmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: invert_i8_mask_extract_8:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: vmovd %edi, %xmm0
+; X64-KNL-NEXT: vpbroadcastb %xmm0, %xmm0
+; X64-KNL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-KNL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
+; X64-KNL-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <8 x i8> poison, i8 %mask, i64 0
%.splat = shufflevector <8 x i8> %.splatinsert, <8 x i8> poison, <8 x i32> zeroinitializer
@@ -89,11 +150,21 @@ entry:
}
define <4 x i1> @i16_mask_extract_4(i16 %mask) {
-; CHECK-LABEL: i16_mask_extract_4:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: vpmovm2d %k0, %xmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: i16_mask_extract_4:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovd %edi, %k0
+; X64-AVX512-NEXT: vpmovm2d %k0, %xmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: i16_mask_extract_4:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: vmovd %edi, %xmm0
+; X64-KNL-NEXT: vpbroadcastw %xmm0, %xmm0
+; X64-KNL-NEXT: vpbroadcastq {{.*#+}} xmm1 = [1,2,4,8,1,2,4,8]
+; X64-KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-KNL-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; X64-KNL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <4 x i16> poison, i16 %mask, i64 0
%.splat = shufflevector <4 x i16> %.splatinsert, <4 x i16> poison, <4 x i32> zeroinitializer
@@ -103,12 +174,22 @@ entry:
}
define <4 x i1> @invert_i16_mask_extract_4(i16 %mask) {
-; CHECK-LABEL: invert_i16_mask_extract_4:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: knotw %k0, %k0
-; CHECK-NEXT: vpmovm2d %k0, %xmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: invert_i16_mask_extract_4:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovd %edi, %k0
+; X64-AVX512-NEXT: knotw %k0, %k0
+; X64-AVX512-NEXT: vpmovm2d %k0, %xmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: invert_i16_mask_extract_4:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: vmovd %edi, %xmm0
+; X64-KNL-NEXT: vpbroadcastw %xmm0, %xmm0
+; X64-KNL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-KNL-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; X64-KNL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <4 x i16> poison, i16 %mask, i64 0
%.splat = shufflevector <4 x i16> %.splatinsert, <4 x i16> poison, <4 x i32> zeroinitializer
@@ -118,13 +199,22 @@ entry:
}
define <8 x i1> @i16_mask_extract_8(i16 %mask) {
-; CHECK-LABEL: i16_mask_extract_8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vpbroadcastw %edi, %xmm0
-; CHECK-NEXT: vpmovzxbw {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
-; CHECK-NEXT: vpand %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: i16_mask_extract_8:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: vpbroadcastw %edi, %xmm0
+; X64-AVX512-NEXT: vpmovzxbw {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; X64-AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-AVX512-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: i16_mask_extract_8:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: vmovd %edi, %xmm0
+; X64-KNL-NEXT: vpbroadcastw %xmm0, %xmm0
+; X64-KNL-NEXT: vpmovzxbw {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
+; X64-KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-KNL-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <8 x i16> poison, i16 %mask, i64 0
%.splat = shufflevector <8 x i16> %.splatinsert, <8 x i16> poison, <8 x i32> zeroinitializer
@@ -134,13 +224,22 @@ entry:
}
define <8 x i1> @invert_i16_mask_extract_8(i16 %mask) {
-; CHECK-LABEL: invert_i16_mask_extract_8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vpbroadcastw %edi, %xmm0
-; CHECK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: invert_i16_mask_extract_8:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: vpbroadcastw %edi, %xmm0
+; X64-AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-AVX512-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: invert_i16_mask_extract_8:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: vmovd %edi, %xmm0
+; X64-KNL-NEXT: vpbroadcastw %xmm0, %xmm0
+; X64-KNL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-KNL-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <8 x i16> poison, i16 %mask, i64 0
%.splat = shufflevector <8 x i16> %.splatinsert, <8 x i16> poison, <8 x i32> zeroinitializer
@@ -150,11 +249,22 @@ entry:
}
define <16 x i1> @i16_mask_extract_16(i16 %mask) {
-; CHECK-LABEL: i16_mask_extract_16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: vpmovm2b %k0, %xmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: i16_mask_extract_16:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovd %edi, %k0
+; X64-AVX512-NEXT: vpmovm2b %k0, %xmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: i16_mask_extract_16:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: vmovd %edi, %xmm0
+; X64-KNL-NEXT: vpbroadcastw %xmm0, %ymm0
+; X64-KNL-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
+; X64-KNL-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X64-KNL-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
+; X64-KNL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; X64-KNL-NEXT: vpmovdb %zmm0, %xmm0
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <16 x i16> poison, i16 %mask, i64 0
%.splat = shufflevector <16 x i16> %.splatinsert, <16 x i16> poison, <16 x i32> zeroinitializer
@@ -164,12 +274,23 @@ entry:
}
define <16 x i1> @invert_i16_mask_extract_16(i16 %mask) {
-; CHECK-LABEL: invert_i16_mask_extract_16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: knotw %k0, %k0
-; CHECK-NEXT: vpmovm2b %k0, %xmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: invert_i16_mask_extract_16:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovd %edi, %k0
+; X64-AVX512-NEXT: knotw %k0, %k0
+; X64-AVX512-NEXT: vpmovm2b %k0, %xmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: invert_i16_mask_extract_16:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: vmovd %edi, %xmm0
+; X64-KNL-NEXT: vpbroadcastw %xmm0, %ymm0
+; X64-KNL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; X64-KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-KNL-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
+; X64-KNL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; X64-KNL-NEXT: vpmovdb %zmm0, %xmm0
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <16 x i16> poison, i16 %mask, i64 0
%.splat = shufflevector <16 x i16> %.splatinsert, <16 x i16> poison, <16 x i32> zeroinitializer
@@ -179,11 +300,18 @@ entry:
}
define <16 x i1> @i32_mask_extract_16(i32 %mask) {
-; CHECK-LABEL: i32_mask_extract_16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: vpmovm2b %k0, %xmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: i32_mask_extract_16:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovd %edi, %k0
+; X64-AVX512-NEXT: vpmovm2b %k0, %xmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: i32_mask_extract_16:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: kmovw %edi, %k1
+; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
+; X64-KNL-NEXT: vpmovdb %zmm0, %xmm0
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <16 x i32> poison, i32 %mask, i64 0
%.splat = shufflevector <16 x i32> %.splatinsert, <16 x i32> poison, <16 x i32> zeroinitializer
@@ -193,12 +321,20 @@ entry:
}
define <16 x i1> @invert_i32_mask_extract_16(i32 %mask) {
-; CHECK-LABEL: invert_i32_mask_extract_16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: knotw %k0, %k0
-; CHECK-NEXT: vpmovm2b %k0, %xmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: invert_i32_mask_extract_16:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovd %edi, %k0
+; X64-AVX512-NEXT: knotw %k0, %k0
+; X64-AVX512-NEXT: vpmovm2b %k0, %xmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: invert_i32_mask_extract_16:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: kmovw %edi, %k0
+; X64-KNL-NEXT: knotw %k0, %k1
+; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
+; X64-KNL-NEXT: vpmovdb %zmm0, %xmm0
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <16 x i32> poison, i32 %mask, i64 0
%.splat = shufflevector <16 x i32> %.splatinsert, <16 x i32> poison, <16 x i32> zeroinitializer
@@ -208,13 +344,25 @@ entry:
}
define <32 x i1> @i32_mask_extract_32(i32 %mask) {
-; CHECK-LABEL: i32_mask_extract_32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: kshiftrd $16, %k0, %k1
-; CHECK-NEXT: kunpckwd %k0, %k1, %k0
-; CHECK-NEXT: vpmovm2b %k0, %ymm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: i32_mask_extract_32:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovd %edi, %k0
+; X64-AVX512-NEXT: kshiftrd $16, %k0, %k1
+; X64-AVX512-NEXT: kunpckwd %k0, %k1, %k0
+; X64-AVX512-NEXT: vpmovm2b %k0, %ymm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: i32_mask_extract_32:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: kmovw %edi, %k1
+; X64-KNL-NEXT: shrl $16, %edi
+; X64-KNL-NEXT: kmovw %edi, %k2
+; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
+; X64-KNL-NEXT: vpmovdb %zmm0, %xmm0
+; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm1 {%k2} {z} = -1
+; X64-KNL-NEXT: vpmovdb %zmm1, %xmm1
+; X64-KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <32 x i32> poison, i32 %mask, i64 0
%.splat = shufflevector <32 x i32> %.splatinsert, <32 x i32> poison, <32 x i32> zeroinitializer
@@ -224,13 +372,25 @@ entry:
}
define <32 x i1> @invert_i32_mask_extract_32(i32 %mask) {
-; CHECK-LABEL: invert_i32_mask_extract_32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: kshiftrd $16, %k0, %k1
-; CHECK-NEXT: kunpckwd %k0, %k1, %k0
-; CHECK-NEXT: vpmovm2b %k0, %ymm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: invert_i32_mask_extract_32:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovd %edi, %k0
+; X64-AVX512-NEXT: kshiftrd $16, %k0, %k1
+; X64-AVX512-NEXT: kunpckwd %k0, %k1, %k0
+; X64-AVX512-NEXT: vpmovm2b %k0, %ymm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: invert_i32_mask_extract_32:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: kmovw %edi, %k1
+; X64-KNL-NEXT: shrl $16, %edi
+; X64-KNL-NEXT: kmovw %edi, %k2
+; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
+; X64-KNL-NEXT: vpmovdb %zmm0, %xmm0
+; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm1 {%k2} {z} = -1
+; X64-KNL-NEXT: vpmovdb %zmm1, %xmm1
+; X64-KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <32 x i32> poison, i32 %mask, i64 0
%.splat = shufflevector <32 x i32> %.splatinsert, <32 x i32> poison, <32 x i32> zeroinitializer
@@ -240,23 +400,45 @@ entry:
}
define <32 x i1> @i64_mask_extract_32(i64 %mask) {
-; CHECK-LABEL: i64_mask_extract_32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: kmovd %eax, %k0
-; CHECK-NEXT: movzbl %ah, %ecx
-; CHECK-NEXT: kmovd %ecx, %k1
-; CHECK-NEXT: kunpckbw %k0, %k1, %k0
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: shrl $24, %ecx
-; CHECK-NEXT: kmovd %ecx, %k1
-; CHECK-NEXT: shrl $16, %eax
-; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: kmovd %eax, %k2
-; CHECK-NEXT: kunpckbw %k2, %k1, %k1
-; CHECK-NEXT: kunpckwd %k0, %k1, %k0
-; CHECK-NEXT: vpmovm2b %k0, %ymm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: i64_mask_extract_32:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: movq %rdi, %rax
+; X64-AVX512-NEXT: kmovd %eax, %k0
+; X64-AVX512-NEXT: movzbl %ah, %ecx
+; X64-AVX512-NEXT: kmovd %ecx, %k1
+; X64-AVX512-NEXT: kunpckbw %k0, %k1, %k0
+; X64-AVX512-NEXT: movl %eax, %ecx
+; X64-AVX512-NEXT: shrl $24, %ecx
+; X64-AVX512-NEXT: kmovd %ecx, %k1
+; X64-AVX512-NEXT: shrl $16, %eax
+; X64-AVX512-NEXT: movzbl %al, %eax
+; X64-AVX512-NEXT: kmovd %eax, %k2
+; X64-AVX512-NEXT: kunpckbw %k2, %k1, %k1
+; X64-AVX512-NEXT: kunpckwd %k0, %k1, %k0
+; X64-AVX512-NEXT: vpmovm2b %k0, %ymm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: i64_mask_extract_32:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: movq %rdi, %rax
+; X64-KNL-NEXT: movl %eax, %ecx
+; X64-KNL-NEXT: kmovw %eax, %k0
+; X64-KNL-NEXT: movzbl %ah, %edx
+; X64-KNL-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-KNL-NEXT: shrl $24, %eax
+; X64-KNL-NEXT: kmovw %eax, %k1
+; X64-KNL-NEXT: shrl $16, %ecx
+; X64-KNL-NEXT: movzbl %cl, %eax
+; X64-KNL-NEXT: kmovw %eax, %k2
+; X64-KNL-NEXT: kunpckbw %k2, %k1, %k1
+; X64-KNL-NEXT: kmovw %edx, %k2
+; X64-KNL-NEXT: kunpckbw %k0, %k2, %k2
+; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm0 {%k2} {z} = -1
+; X64-KNL-NEXT: vpmovdb %zmm0, %xmm0
+; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm1 {%k1} {z} = -1
+; X64-KNL-NEXT: vpmovdb %zmm1, %xmm1
+; X64-KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <32 x i64> poison, i64 %mask, i64 0
%.splat = shufflevector <32 x i64> %.splatinsert, <32 x i64> poison, <32 x i32> zeroinitializer
@@ -266,21 +448,45 @@ entry:
}
define <32 x i1> @invert_i64_mask_extract_32(i64 %mask) {
-; CHECK-LABEL: invert_i64_mask_extract_32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovq %rdi, %k0
-; CHECK-NEXT: knotb %k0, %k1
-; CHECK-NEXT: kshiftrd $8, %k0, %k2
-; CHECK-NEXT: knotb %k2, %k2
-; CHECK-NEXT: kunpckbw %k1, %k2, %k1
-; CHECK-NEXT: kshiftrd $16, %k0, %k2
-; CHECK-NEXT: knotb %k2, %k2
-; CHECK-NEXT: kshiftrd $24, %k0, %k0
-; CHECK-NEXT: knotb %k0, %k0
-; CHECK-NEXT: kunpckbw %k2, %k0, %k0
-; CHECK-NEXT: kunpckwd %k1, %k0, %k0
-; CHECK-NEXT: vpmovm2b %k0, %ymm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: invert_i64_mask_extract_32:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovq %rdi, %k0
+; X64-AVX512-NEXT: knotb %k0, %k1
+; X64-AVX512-NEXT: kshiftrd $8, %k0, %k2
+; X64-AVX512-NEXT: knotb %k2, %k2
+; X64-AVX512-NEXT: kunpckbw %k1, %k2, %k1
+; X64-AVX512-NEXT: kshiftrd $16, %k0, %k2
+; X64-AVX512-NEXT: knotb %k2, %k2
+; X64-AVX512-NEXT: kshiftrd $24, %k0, %k0
+; X64-AVX512-NEXT: knotb %k0, %k0
+; X64-AVX512-NEXT: kunpckbw %k2, %k0, %k0
+; X64-AVX512-NEXT: kunpckwd %k1, %k0, %k0
+; X64-AVX512-NEXT: vpmovm2b %k0, %ymm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: invert_i64_mask_extract_32:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: movl %edi, %eax
+; X64-KNL-NEXT: shrl $16, %eax
+; X64-KNL-NEXT: kmovw %eax, %k0
+; X64-KNL-NEXT: knotw %k0, %k0
+; X64-KNL-NEXT: movl %edi, %eax
+; X64-KNL-NEXT: shrl $24, %eax
+; X64-KNL-NEXT: kmovw %eax, %k1
+; X64-KNL-NEXT: knotw %k1, %k1
+; X64-KNL-NEXT: kunpckbw %k0, %k1, %k1
+; X64-KNL-NEXT: kmovw %edi, %k0
+; X64-KNL-NEXT: knotw %k0, %k0
+; X64-KNL-NEXT: shrl $8, %edi
+; X64-KNL-NEXT: kmovw %edi, %k2
+; X64-KNL-NEXT: knotw %k2, %k2
+; X64-KNL-NEXT: kunpckbw %k0, %k2, %k2
+; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm0 {%k2} {z} = -1
+; X64-KNL-NEXT: vpmovdb %zmm0, %xmm0
+; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm1 {%k1} {z} = -1
+; X64-KNL-NEXT: vpmovdb %zmm1, %xmm1
+; X64-KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <32 x i64> poison, i64 %mask, i64 0
%.splat = shufflevector <32 x i64> %.splatinsert, <32 x i64> poison, <32 x i32> zeroinitializer
@@ -290,42 +496,85 @@ entry:
}
define <64 x i1> @i64_mask_extract_64(i64 %mask) {
-; CHECK-LABEL: i64_mask_extract_64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: movq %rdi, %rax
-; CHECK-NEXT: kmovd %eax, %k0
-; CHECK-NEXT: movzbl %ah, %ecx
-; CHECK-NEXT: kmovd %ecx, %k1
-; CHECK-NEXT: kunpckbw %k0, %k1, %k0
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: shrl $24, %ecx
-; CHECK-NEXT: kmovd %ecx, %k1
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: shrl $16, %ecx
-; CHECK-NEXT: movzbl %cl, %ecx
-; CHECK-NEXT: kmovd %ecx, %k2
-; CHECK-NEXT: kunpckbw %k2, %k1, %k1
-; CHECK-NEXT: kunpckwd %k0, %k1, %k0
-; CHECK-NEXT: movq %rdi, %rcx
-; CHECK-NEXT: shrq $32, %rcx
-; CHECK-NEXT: movzbl %cl, %ecx
-; CHECK-NEXT: kmovd %ecx, %k1
-; CHECK-NEXT: movq %rdi, %rcx
-; CHECK-NEXT: shrq $40, %rcx
-; CHECK-NEXT: movzbl %cl, %ecx
-; CHECK-NEXT: kmovd %ecx, %k2
-; CHECK-NEXT: kunpckbw %k1, %k2, %k1
-; CHECK-NEXT: movq %rdi, %rcx
-; CHECK-NEXT: shrq $56, %rcx
-; CHECK-NEXT: kmovd %ecx, %k2
-; CHECK-NEXT: shrq $48, %rax
-; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: kmovd %eax, %k3
-; CHECK-NEXT: kunpckbw %k3, %k2, %k2
-; CHECK-NEXT: kunpckwd %k1, %k2, %k1
-; CHECK-NEXT: kunpckdq %k0, %k1, %k0
-; CHECK-NEXT: vpmovm2b %k0, %zmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: i64_mask_extract_64:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: movq %rdi, %rax
+; X64-AVX512-NEXT: kmovd %eax, %k0
+; X64-AVX512-NEXT: movzbl %ah, %ecx
+; X64-AVX512-NEXT: kmovd %ecx, %k1
+; X64-AVX512-NEXT: kunpckbw %k0, %k1, %k0
+; X64-AVX512-NEXT: movl %eax, %ecx
+; X64-AVX512-NEXT: shrl $24, %ecx
+; X64-AVX512-NEXT: kmovd %ecx, %k1
+; X64-AVX512-NEXT: movl %eax, %ecx
+; X64-AVX512-NEXT: shrl $16, %ecx
+; X64-AVX512-NEXT: movzbl %cl, %ecx
+; X64-AVX512-NEXT: kmovd %ecx, %k2
+; X64-AVX512-NEXT: kunpckbw %k2, %k1, %k1
+; X64-AVX512-NEXT: kunpckwd %k0, %k1, %k0
+; X64-AVX512-NEXT: movq %rdi, %rcx
+; X64-AVX512-NEXT: shrq $32, %rcx
+; X64-AVX512-NEXT: movzbl %cl, %ecx
+; X64-AVX512-NEXT: kmovd %ecx, %k1
+; X64-AVX512-NEXT: movq %rdi, %rcx
+; X64-AVX512-NEXT: shrq $40, %rcx
+; X64-AVX512-NEXT: movzbl %cl, %ecx
+; X64-AVX512-NEXT: kmovd %ecx, %k2
+; X64-AVX512-NEXT: kunpckbw %k1, %k2, %k1
+; X64-AVX512-NEXT: movq %rdi, %rcx
+; X64-AVX512-NEXT: shrq $56, %rcx
+; X64-AVX512-NEXT: kmovd %ecx, %k2
+; X64-AVX512-NEXT: shrq $48, %rax
+; X64-AVX512-NEXT: movzbl %al, %eax
+; X64-AVX512-NEXT: kmovd %eax, %k3
+; X64-AVX512-NEXT: kunpckbw %k3, %k2, %k2
+; X64-AVX512-NEXT: kunpckwd %k1, %k2, %k1
+; X64-AVX512-NEXT: kunpckdq %k0, %k1, %k0
+; X64-AVX512-NEXT: vpmovm2b %k0, %zmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: i64_mask_extract_64:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: pushq %rbx
+; X64-KNL-NEXT: .cfi_def_cfa_offset 16
+; X64-KNL-NEXT: .cfi_offset %rbx, -16
+; X64-KNL-NEXT: movq %rsi, %rcx
+; X64-KNL-NEXT: movq %rdi, %rax
+; X64-KNL-NEXT: movl %ecx, %edx
+; X64-KNL-NEXT: movq %rsi, %rdi
+; X64-KNL-NEXT: movq %rsi, %r8
+; X64-KNL-NEXT: movq %rsi, %r9
+; X64-KNL-NEXT: kmovw %ecx, %k0
+; X64-KNL-NEXT: movzbl %ch, %ebx
+; X64-KNL-NEXT: # kill: def $ecx killed $ecx killed $rcx
+; X64-KNL-NEXT: shrl $24, %ecx
+; X64-KNL-NEXT: kmovw %ecx, %k1
+; X64-KNL-NEXT: shrl $16, %edx
+; X64-KNL-NEXT: movzbl %dl, %ecx
+; X64-KNL-NEXT: kmovw %ecx, %k2
+; X64-KNL-NEXT: shrq $32, %rsi
+; X64-KNL-NEXT: movzbl %sil, %ecx
+; X64-KNL-NEXT: kmovw %ecx, %k3
+; X64-KNL-NEXT: shrq $40, %rdi
+; X64-KNL-NEXT: movzbl %dil, %ecx
+; X64-KNL-NEXT: kmovw %ecx, %k4
+; X64-KNL-NEXT: kunpckbw %k2, %k1, %k1
+; X64-KNL-NEXT: shrq $56, %r8
+; X64-KNL-NEXT: kmovw %r8d, %k2
+; X64-KNL-NEXT: kunpckbw %k3, %k4, %k3
+; X64-KNL-NEXT: shrq $48, %r9
+; X64-KNL-NEXT: movzbl %r9b, %ecx
+; X64-KNL-NEXT: kmovw %ecx, %k4
+; X64-KNL-NEXT: kunpckbw %k4, %k2, %k2
+; X64-KNL-NEXT: kmovw %ebx, %k4
+; X64-KNL-NEXT: kunpckbw %k0, %k4, %k0
+; X64-KNL-NEXT: kmovw %k0, (%rax)
+; X64-KNL-NEXT: kmovw %k2, 6(%rax)
+; X64-KNL-NEXT: kmovw %k3, 4(%rax)
+; X64-KNL-NEXT: kmovw %k1, 2(%rax)
+; X64-KNL-NEXT: popq %rbx
+; X64-KNL-NEXT: .cfi_def_cfa_offset 8
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <64 x i64> poison, i64 %mask, i64 0
%.splat = shufflevector <64 x i64> %.splatinsert, <64 x i64> poison, <64 x i32> zeroinitializer
@@ -335,33 +584,75 @@ entry:
}
define <64 x i1> @invert_i64_mask_extract_64(i64 %mask) {
-; CHECK-LABEL: invert_i64_mask_extract_64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: kmovq %rdi, %k0
-; CHECK-NEXT: kshiftrq $32, %k0, %k1
-; CHECK-NEXT: knotb %k1, %k1
-; CHECK-NEXT: kshiftrq $40, %k0, %k2
-; CHECK-NEXT: knotb %k2, %k2
-; CHECK-NEXT: kunpckbw %k1, %k2, %k1
-; CHECK-NEXT: kshiftrq $48, %k0, %k2
-; CHECK-NEXT: knotb %k2, %k2
-; CHECK-NEXT: kshiftrq $56, %k0, %k3
-; CHECK-NEXT: knotb %k3, %k3
-; CHECK-NEXT: kunpckbw %k2, %k3, %k2
-; CHECK-NEXT: kunpckwd %k1, %k2, %k1
-; CHECK-NEXT: knotb %k0, %k2
-; CHECK-NEXT: kshiftrd $8, %k0, %k3
-; CHECK-NEXT: knotb %k3, %k3
-; CHECK-NEXT: kunpckbw %k2, %k3, %k2
-; CHECK-NEXT: kshiftrd $16, %k0, %k3
-; CHECK-NEXT: knotb %k3, %k3
-; CHECK-NEXT: kshiftrd $24, %k0, %k0
-; CHECK-NEXT: knotb %k0, %k0
-; CHECK-NEXT: kunpckbw %k3, %k0, %k0
-; CHECK-NEXT: kunpckwd %k2, %k0, %k0
-; CHECK-NEXT: kunpckdq %k0, %k1, %k0
-; CHECK-NEXT: vpmovm2b %k0, %zmm0
-; CHECK-NEXT: retq
+; X64-AVX512-LABEL: invert_i64_mask_extract_64:
+; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512-NEXT: kmovq %rdi, %k0
+; X64-AVX512-NEXT: kshiftrq $32, %k0, %k1
+; X64-AVX512-NEXT: knotb %k1, %k1
+; X64-AVX512-NEXT: kshiftrq $40, %k0, %k2
+; X64-AVX512-NEXT: knotb %k2, %k2
+; X64-AVX512-NEXT: kunpckbw %k1, %k2, %k1
+; X64-AVX512-NEXT: kshiftrq $48, %k0, %k2
+; X64-AVX512-NEXT: knotb %k2, %k2
+; X64-AVX512-NEXT: kshiftrq $56, %k0, %k3
+; X64-AVX512-NEXT: knotb %k3, %k3
+; X64-AVX512-NEXT: kunpckbw %k2, %k3, %k2
+; X64-AVX512-NEXT: kunpckwd %k1, %k2, %k1
+; X64-AVX512-NEXT: knotb %k0, %k2
+; X64-AVX512-NEXT: kshiftrd $8, %k0, %k3
+; X64-AVX512-NEXT: knotb %k3, %k3
+; X64-AVX512-NEXT: kunpckbw %k2, %k3, %k2
+; X64-AVX512-NEXT: kshiftrd $16, %k0, %k3
+; X64-AVX512-NEXT: knotb %k3, %k3
+; X64-AVX512-NEXT: kshiftrd $24, %k0, %k0
+; X64-AVX512-NEXT: knotb %k0, %k0
+; X64-AVX512-NEXT: kunpckbw %k3, %k0, %k0
+; X64-AVX512-NEXT: kunpckwd %k2, %k0, %k0
+; X64-AVX512-NEXT: kunpckdq %k0, %k1, %k0
+; X64-AVX512-NEXT: vpmovm2b %k0, %zmm0
+; X64-AVX512-NEXT: retq
+;
+; X64-KNL-LABEL: invert_i64_mask_extract_64:
+; X64-KNL: # %bb.0: # %entry
+; X64-KNL-NEXT: movq %rdi, %rax
+; X64-KNL-NEXT: kmovw %esi, %k0
+; X64-KNL-NEXT: knotw %k0, %k0
+; X64-KNL-NEXT: movl %esi, %ecx
+; X64-KNL-NEXT: shrl $8, %ecx
+; X64-KNL-NEXT: kmovw %ecx, %k1
+; X64-KNL-NEXT: knotw %k1, %k1
+; X64-KNL-NEXT: kunpckbw %k0, %k1, %k0
+; X64-KNL-NEXT: movl %esi, %ecx
+; X64-KNL-NEXT: shrl $16, %ecx
+; X64-KNL-NEXT: kmovw %ecx, %k1
+; X64-KNL-NEXT: knotw %k1, %k1
+; X64-KNL-NEXT: movl %esi, %ecx
+; X64-KNL-NEXT: shrl $24, %ecx
+; X64-KNL-NEXT: kmovw %ecx, %k2
+; X64-KNL-NEXT: knotw %k2, %k2
+; X64-KNL-NEXT: kunpckbw %k1, %k2, %k1
+; X64-KNL-NEXT: movq %rsi, %rcx
+; X64-KNL-NEXT: shrq $32, %rcx
+; X64-KNL-NEXT: kmovw %ecx, %k2
+; X64-KNL-NEXT: knotw %k2, %k2
+; X64-KNL-NEXT: movq %rsi, %rcx
+; X64-KNL-NEXT: shrq $40, %rcx
+; X64-KNL-NEXT: kmovw %ecx, %k3
+; X64-KNL-NEXT: knotw %k3, %k3
+; X64-KNL-NEXT: kunpckbw %k2, %k3, %k2
+; X64-KNL-NEXT: movq %rsi, %rcx
+; X64-KNL-NEXT: shrq $48, %rcx
+; X64-KNL-NEXT: kmovw %ecx, %k3
+; X64-KNL-NEXT: knotw %k3, %k3
+; X64-KNL-NEXT: shrq $56, %rsi
+; X64-KNL-NEXT: kmovw %esi, %k4
+; X64-KNL-NEXT: knotw %k4, %k4
+; X64-KNL-NEXT: kunpckbw %k3, %k4, %k3
+; X64-KNL-NEXT: kmovw %k3, 6(%rdi)
+; X64-KNL-NEXT: kmovw %k2, 4(%rdi)
+; X64-KNL-NEXT: kmovw %k1, 2(%rdi)
+; X64-KNL-NEXT: kmovw %k0, (%rdi)
+; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <64 x i64> poison, i64 %mask, i64 0
%.splat = shufflevector <64 x i64> %.splatinsert, <64 x i64> poison, <64 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/X86/pr78897.ll b/llvm/test/CodeGen/X86/pr78897.ll
index c3c597f4d79de..0caa569107c0c 100644
--- a/llvm/test/CodeGen/X86/pr78897.ll
+++ b/llvm/test/CodeGen/X86/pr78897.ll
@@ -223,7 +223,8 @@ define <16 x i8> @produceShuffleVectorForByte(i8 zeroext %0) nounwind {
; X86-AVX512-NEXT: pushl %ebx
; X86-AVX512-NEXT: pushl %edi
; X86-AVX512-NEXT: pushl %esi
-; X86-AVX512-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-AVX512-NEXT: vpbroadcastb {{[0-9]+}}(%esp), %xmm0
+; X86-AVX512-NEXT: vmovd %xmm0, %eax
; X86-AVX512-NEXT: kmovd %eax, %k0
; X86-AVX512-NEXT: knotw %k0, %k1
; X86-AVX512-NEXT: vmovdqu8 {{.*#+}} xmm0 {%k1} {z} = [17,17,17,17,17,17,17,17,u,u,u,u,u,u,u,u]
>From 11f9dbb96df67eb1b2f6fe99491e7d3539738858 Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Fri, 7 Feb 2025 13:02:56 +0530
Subject: [PATCH 11/17] Update X86ISelLowering.cpp
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2ed1dbcf70e36..3fd80dc9c8c94 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -55496,8 +55496,8 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
}
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- const DataLayout &DL = DAG.getDataLayout();
- MVT VecIdxTy = TLI.getVectorIdxTy(DL);
+ const DataLayout &DataLayout = DAG.getDataLayout();
+ MVT VecIdxTy = TLI.getVectorIdxTy(DataLayout);
MVT BroadcastOpVT = Broadcast.getSimpleValueType().getVectorElementType();
SDValue BroadcastOp;
if (Broadcast.getOpcode() != X86ISD::VBROADCAST) {
@@ -55519,7 +55519,7 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
// not fit in an i16 and a vXi32 where X > 16 is more than 512 bits.
SDValue Trunc = DAG.getAnyExtOrTrunc(Masked, DL, MVT::i16);
SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, MVT::v16i1, Trunc);
- MVT PtrTy = TLI.getPointerTy(DL);
+ MVT PtrTy = TLI.getPointerTy(DataLayout);
if (CC == ISD::SETEQ)
Bitcast =
>From bfc963bb7dfb3407c25ce8d65d15ab0bcd672d29 Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Fri, 7 Feb 2025 15:52:53 +0530
Subject: [PATCH 12/17] Update X86ISelLowering.cpp
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 3fd80dc9c8c94..b9d01c609645f 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -55491,7 +55491,7 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
break;
}
- if (!EltBits[I].isOneBitSet(N + I))
+ if (EltBits[I].getBitWidth() <= N + I || !EltBits[I].isOneBitSet(N + I))
return SDValue();
}
@@ -55505,6 +55505,8 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
Broadcast, DAG.getConstant(0, DL, VecIdxTy));
} else {
BroadcastOp = Broadcast.getOperand(0);
+ if (BroadcastOp.getValueType().isVector())
+ return SDValue();
}
SDValue Masked = BroadcastOp;
>From e1a9e352640239e1d4f568ef4a13b7de401ce54f Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Sat, 22 Feb 2025 22:59:04 +0530
Subject: [PATCH 13/17] Address review comments
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 4 +-
llvm/test/CodeGen/X86/kmov.ll | 136 +++++++++---------------
2 files changed, 53 insertions(+), 87 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index b9d01c609645f..a2b0efe369343 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -55476,7 +55476,7 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
UndefElts, EltBits,
/*AllowWholeUndefs*/ true,
/*AllowPartialUndefs*/ false) ||
- UndefElts[0] || !EltBits[0].isPowerOf2())
+ UndefElts[0] || !EltBits[0].isPowerOf2() || UndefElts.getBitWidth() > 16)
return SDValue();
// Check if the constant pool contains only powers of 2 starting from some
@@ -55511,7 +55511,7 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
SDValue Masked = BroadcastOp;
if (N != 0) {
- unsigned Mask = (1ULL << Len) - 1;
+ APInt Mask = APInt::getLowBitsSet(Len, Len);
SDValue ShiftedValue = DAG.getNode(ISD::SRL, DL, BroadcastOpVT, BroadcastOp,
DAG.getConstant(N, DL, BroadcastOpVT));
Masked = DAG.getNode(ISD::AND, DL, BroadcastOpVT, ShiftedValue,
diff --git a/llvm/test/CodeGen/X86/kmov.ll b/llvm/test/CodeGen/X86/kmov.ll
index 1ff3ead3a3c86..c552f0ed11c8b 100644
--- a/llvm/test/CodeGen/X86/kmov.ll
+++ b/llvm/test/CodeGen/X86/kmov.ll
@@ -402,36 +402,28 @@ entry:
define <32 x i1> @i64_mask_extract_32(i64 %mask) {
; X64-AVX512-LABEL: i64_mask_extract_32:
; X64-AVX512: # %bb.0: # %entry
-; X64-AVX512-NEXT: movq %rdi, %rax
-; X64-AVX512-NEXT: kmovd %eax, %k0
-; X64-AVX512-NEXT: movzbl %ah, %ecx
-; X64-AVX512-NEXT: kmovd %ecx, %k1
-; X64-AVX512-NEXT: kunpckbw %k0, %k1, %k0
-; X64-AVX512-NEXT: movl %eax, %ecx
-; X64-AVX512-NEXT: shrl $24, %ecx
-; X64-AVX512-NEXT: kmovd %ecx, %k1
-; X64-AVX512-NEXT: shrl $16, %eax
-; X64-AVX512-NEXT: movzbl %al, %eax
-; X64-AVX512-NEXT: kmovd %eax, %k2
-; X64-AVX512-NEXT: kunpckbw %k2, %k1, %k1
-; X64-AVX512-NEXT: kunpckwd %k0, %k1, %k0
+; X64-AVX512-NEXT: kmovq %rdi, %k0
+; X64-AVX512-NEXT: kshiftrd $8, %k0, %k1
+; X64-AVX512-NEXT: kunpckbw %k0, %k1, %k1
+; X64-AVX512-NEXT: kshiftrq $24, %k0, %k2
+; X64-AVX512-NEXT: kshiftrd $16, %k0, %k0
+; X64-AVX512-NEXT: kunpckbw %k0, %k2, %k0
+; X64-AVX512-NEXT: kunpckwd %k1, %k0, %k0
; X64-AVX512-NEXT: vpmovm2b %k0, %ymm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: i64_mask_extract_32:
; X64-KNL: # %bb.0: # %entry
; X64-KNL-NEXT: movq %rdi, %rax
-; X64-KNL-NEXT: movl %eax, %ecx
+; X64-KNL-NEXT: shrq $24, %rax
; X64-KNL-NEXT: kmovw %eax, %k0
-; X64-KNL-NEXT: movzbl %ah, %edx
-; X64-KNL-NEXT: # kill: def $eax killed $eax killed $rax
-; X64-KNL-NEXT: shrl $24, %eax
+; X64-KNL-NEXT: movl %edi, %eax
+; X64-KNL-NEXT: shrl $16, %eax
; X64-KNL-NEXT: kmovw %eax, %k1
-; X64-KNL-NEXT: shrl $16, %ecx
-; X64-KNL-NEXT: movzbl %cl, %eax
-; X64-KNL-NEXT: kmovw %eax, %k2
-; X64-KNL-NEXT: kunpckbw %k2, %k1, %k1
-; X64-KNL-NEXT: kmovw %edx, %k2
+; X64-KNL-NEXT: kunpckbw %k1, %k0, %k1
+; X64-KNL-NEXT: kmovw %edi, %k0
+; X64-KNL-NEXT: shrl $8, %edi
+; X64-KNL-NEXT: kmovw %edi, %k2
; X64-KNL-NEXT: kunpckbw %k0, %k2, %k2
; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm0 {%k2} {z} = -1
; X64-KNL-NEXT: vpmovdb %zmm0, %xmm0
@@ -498,82 +490,56 @@ entry:
define <64 x i1> @i64_mask_extract_64(i64 %mask) {
; X64-AVX512-LABEL: i64_mask_extract_64:
; X64-AVX512: # %bb.0: # %entry
-; X64-AVX512-NEXT: movq %rdi, %rax
-; X64-AVX512-NEXT: kmovd %eax, %k0
-; X64-AVX512-NEXT: movzbl %ah, %ecx
-; X64-AVX512-NEXT: kmovd %ecx, %k1
-; X64-AVX512-NEXT: kunpckbw %k0, %k1, %k0
-; X64-AVX512-NEXT: movl %eax, %ecx
-; X64-AVX512-NEXT: shrl $24, %ecx
-; X64-AVX512-NEXT: kmovd %ecx, %k1
-; X64-AVX512-NEXT: movl %eax, %ecx
-; X64-AVX512-NEXT: shrl $16, %ecx
-; X64-AVX512-NEXT: movzbl %cl, %ecx
-; X64-AVX512-NEXT: kmovd %ecx, %k2
-; X64-AVX512-NEXT: kunpckbw %k2, %k1, %k1
-; X64-AVX512-NEXT: kunpckwd %k0, %k1, %k0
-; X64-AVX512-NEXT: movq %rdi, %rcx
-; X64-AVX512-NEXT: shrq $32, %rcx
-; X64-AVX512-NEXT: movzbl %cl, %ecx
-; X64-AVX512-NEXT: kmovd %ecx, %k1
-; X64-AVX512-NEXT: movq %rdi, %rcx
-; X64-AVX512-NEXT: shrq $40, %rcx
-; X64-AVX512-NEXT: movzbl %cl, %ecx
-; X64-AVX512-NEXT: kmovd %ecx, %k2
+; X64-AVX512-NEXT: kmovq %rdi, %k0
+; X64-AVX512-NEXT: kshiftrq $32, %k0, %k1
+; X64-AVX512-NEXT: kshiftrq $40, %k0, %k2
; X64-AVX512-NEXT: kunpckbw %k1, %k2, %k1
-; X64-AVX512-NEXT: movq %rdi, %rcx
-; X64-AVX512-NEXT: shrq $56, %rcx
-; X64-AVX512-NEXT: kmovd %ecx, %k2
-; X64-AVX512-NEXT: shrq $48, %rax
-; X64-AVX512-NEXT: movzbl %al, %eax
-; X64-AVX512-NEXT: kmovd %eax, %k3
-; X64-AVX512-NEXT: kunpckbw %k3, %k2, %k2
+; X64-AVX512-NEXT: kshiftrq $48, %k0, %k2
+; X64-AVX512-NEXT: kshiftrq $56, %k0, %k3
+; X64-AVX512-NEXT: kunpckbw %k2, %k3, %k2
; X64-AVX512-NEXT: kunpckwd %k1, %k2, %k1
+; X64-AVX512-NEXT: kshiftrd $8, %k0, %k2
+; X64-AVX512-NEXT: kunpckbw %k0, %k2, %k2
+; X64-AVX512-NEXT: kshiftrq $24, %k0, %k3
+; X64-AVX512-NEXT: kshiftrd $16, %k0, %k0
+; X64-AVX512-NEXT: kunpckbw %k0, %k3, %k0
+; X64-AVX512-NEXT: kunpckwd %k2, %k0, %k0
; X64-AVX512-NEXT: kunpckdq %k0, %k1, %k0
; X64-AVX512-NEXT: vpmovm2b %k0, %zmm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: i64_mask_extract_64:
; X64-KNL: # %bb.0: # %entry
-; X64-KNL-NEXT: pushq %rbx
-; X64-KNL-NEXT: .cfi_def_cfa_offset 16
-; X64-KNL-NEXT: .cfi_offset %rbx, -16
-; X64-KNL-NEXT: movq %rsi, %rcx
; X64-KNL-NEXT: movq %rdi, %rax
-; X64-KNL-NEXT: movl %ecx, %edx
-; X64-KNL-NEXT: movq %rsi, %rdi
-; X64-KNL-NEXT: movq %rsi, %r8
-; X64-KNL-NEXT: movq %rsi, %r9
-; X64-KNL-NEXT: kmovw %ecx, %k0
-; X64-KNL-NEXT: movzbl %ch, %ebx
-; X64-KNL-NEXT: # kill: def $ecx killed $ecx killed $rcx
-; X64-KNL-NEXT: shrl $24, %ecx
+; X64-KNL-NEXT: kmovw %esi, %k0
+; X64-KNL-NEXT: movl %esi, %ecx
+; X64-KNL-NEXT: shrl $8, %ecx
+; X64-KNL-NEXT: kmovw %ecx, %k1
+; X64-KNL-NEXT: kunpckbw %k0, %k1, %k0
+; X64-KNL-NEXT: movq %rsi, %rcx
+; X64-KNL-NEXT: shrq $24, %rcx
; X64-KNL-NEXT: kmovw %ecx, %k1
-; X64-KNL-NEXT: shrl $16, %edx
-; X64-KNL-NEXT: movzbl %dl, %ecx
+; X64-KNL-NEXT: movl %esi, %ecx
+; X64-KNL-NEXT: shrl $16, %ecx
; X64-KNL-NEXT: kmovw %ecx, %k2
-; X64-KNL-NEXT: shrq $32, %rsi
-; X64-KNL-NEXT: movzbl %sil, %ecx
-; X64-KNL-NEXT: kmovw %ecx, %k3
-; X64-KNL-NEXT: shrq $40, %rdi
-; X64-KNL-NEXT: movzbl %dil, %ecx
-; X64-KNL-NEXT: kmovw %ecx, %k4
; X64-KNL-NEXT: kunpckbw %k2, %k1, %k1
-; X64-KNL-NEXT: shrq $56, %r8
-; X64-KNL-NEXT: kmovw %r8d, %k2
+; X64-KNL-NEXT: movq %rsi, %rcx
+; X64-KNL-NEXT: shrq $32, %rcx
+; X64-KNL-NEXT: kmovw %ecx, %k2
+; X64-KNL-NEXT: movq %rsi, %rcx
+; X64-KNL-NEXT: shrq $40, %rcx
+; X64-KNL-NEXT: kmovw %ecx, %k3
+; X64-KNL-NEXT: kunpckbw %k2, %k3, %k2
+; X64-KNL-NEXT: movq %rsi, %rcx
+; X64-KNL-NEXT: shrq $48, %rcx
+; X64-KNL-NEXT: kmovw %ecx, %k3
+; X64-KNL-NEXT: shrq $56, %rsi
+; X64-KNL-NEXT: kmovw %esi, %k4
; X64-KNL-NEXT: kunpckbw %k3, %k4, %k3
-; X64-KNL-NEXT: shrq $48, %r9
-; X64-KNL-NEXT: movzbl %r9b, %ecx
-; X64-KNL-NEXT: kmovw %ecx, %k4
-; X64-KNL-NEXT: kunpckbw %k4, %k2, %k2
-; X64-KNL-NEXT: kmovw %ebx, %k4
-; X64-KNL-NEXT: kunpckbw %k0, %k4, %k0
-; X64-KNL-NEXT: kmovw %k0, (%rax)
-; X64-KNL-NEXT: kmovw %k2, 6(%rax)
-; X64-KNL-NEXT: kmovw %k3, 4(%rax)
-; X64-KNL-NEXT: kmovw %k1, 2(%rax)
-; X64-KNL-NEXT: popq %rbx
-; X64-KNL-NEXT: .cfi_def_cfa_offset 8
+; X64-KNL-NEXT: kmovw %k3, 6(%rdi)
+; X64-KNL-NEXT: kmovw %k2, 4(%rdi)
+; X64-KNL-NEXT: kmovw %k1, 2(%rdi)
+; X64-KNL-NEXT: kmovw %k0, (%rdi)
; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <64 x i64> poison, i64 %mask, i64 0
>From b76c86d3b7435f1c5866f5f3ac54959f7feb2883 Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Sun, 23 Feb 2025 03:26:46 +0530
Subject: [PATCH 14/17] Fix tests
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 2 +-
llvm/test/CodeGen/X86/kmov.ll | 136 +++++++++++++++---------
2 files changed, 86 insertions(+), 52 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a5001991d54f2..c4bcad8b4a6b9 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -55752,7 +55752,7 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
SDValue Masked = BroadcastOp;
if (N != 0) {
- APInt Mask = APInt::getLowBitsSet(Len, Len);
+ APInt Mask = APInt::getLowBitsSet(BroadcastOpVT.getSizeInBits(), Len);
SDValue ShiftedValue = DAG.getNode(ISD::SRL, DL, BroadcastOpVT, BroadcastOp,
DAG.getConstant(N, DL, BroadcastOpVT));
Masked = DAG.getNode(ISD::AND, DL, BroadcastOpVT, ShiftedValue,
diff --git a/llvm/test/CodeGen/X86/kmov.ll b/llvm/test/CodeGen/X86/kmov.ll
index c552f0ed11c8b..1ff3ead3a3c86 100644
--- a/llvm/test/CodeGen/X86/kmov.ll
+++ b/llvm/test/CodeGen/X86/kmov.ll
@@ -402,28 +402,36 @@ entry:
define <32 x i1> @i64_mask_extract_32(i64 %mask) {
; X64-AVX512-LABEL: i64_mask_extract_32:
; X64-AVX512: # %bb.0: # %entry
-; X64-AVX512-NEXT: kmovq %rdi, %k0
-; X64-AVX512-NEXT: kshiftrd $8, %k0, %k1
-; X64-AVX512-NEXT: kunpckbw %k0, %k1, %k1
-; X64-AVX512-NEXT: kshiftrq $24, %k0, %k2
-; X64-AVX512-NEXT: kshiftrd $16, %k0, %k0
-; X64-AVX512-NEXT: kunpckbw %k0, %k2, %k0
-; X64-AVX512-NEXT: kunpckwd %k1, %k0, %k0
+; X64-AVX512-NEXT: movq %rdi, %rax
+; X64-AVX512-NEXT: kmovd %eax, %k0
+; X64-AVX512-NEXT: movzbl %ah, %ecx
+; X64-AVX512-NEXT: kmovd %ecx, %k1
+; X64-AVX512-NEXT: kunpckbw %k0, %k1, %k0
+; X64-AVX512-NEXT: movl %eax, %ecx
+; X64-AVX512-NEXT: shrl $24, %ecx
+; X64-AVX512-NEXT: kmovd %ecx, %k1
+; X64-AVX512-NEXT: shrl $16, %eax
+; X64-AVX512-NEXT: movzbl %al, %eax
+; X64-AVX512-NEXT: kmovd %eax, %k2
+; X64-AVX512-NEXT: kunpckbw %k2, %k1, %k1
+; X64-AVX512-NEXT: kunpckwd %k0, %k1, %k0
; X64-AVX512-NEXT: vpmovm2b %k0, %ymm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: i64_mask_extract_32:
; X64-KNL: # %bb.0: # %entry
; X64-KNL-NEXT: movq %rdi, %rax
-; X64-KNL-NEXT: shrq $24, %rax
+; X64-KNL-NEXT: movl %eax, %ecx
; X64-KNL-NEXT: kmovw %eax, %k0
-; X64-KNL-NEXT: movl %edi, %eax
-; X64-KNL-NEXT: shrl $16, %eax
+; X64-KNL-NEXT: movzbl %ah, %edx
+; X64-KNL-NEXT: # kill: def $eax killed $eax killed $rax
+; X64-KNL-NEXT: shrl $24, %eax
; X64-KNL-NEXT: kmovw %eax, %k1
-; X64-KNL-NEXT: kunpckbw %k1, %k0, %k1
-; X64-KNL-NEXT: kmovw %edi, %k0
-; X64-KNL-NEXT: shrl $8, %edi
-; X64-KNL-NEXT: kmovw %edi, %k2
+; X64-KNL-NEXT: shrl $16, %ecx
+; X64-KNL-NEXT: movzbl %cl, %eax
+; X64-KNL-NEXT: kmovw %eax, %k2
+; X64-KNL-NEXT: kunpckbw %k2, %k1, %k1
+; X64-KNL-NEXT: kmovw %edx, %k2
; X64-KNL-NEXT: kunpckbw %k0, %k2, %k2
; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm0 {%k2} {z} = -1
; X64-KNL-NEXT: vpmovdb %zmm0, %xmm0
@@ -490,56 +498,82 @@ entry:
define <64 x i1> @i64_mask_extract_64(i64 %mask) {
; X64-AVX512-LABEL: i64_mask_extract_64:
; X64-AVX512: # %bb.0: # %entry
-; X64-AVX512-NEXT: kmovq %rdi, %k0
-; X64-AVX512-NEXT: kshiftrq $32, %k0, %k1
-; X64-AVX512-NEXT: kshiftrq $40, %k0, %k2
+; X64-AVX512-NEXT: movq %rdi, %rax
+; X64-AVX512-NEXT: kmovd %eax, %k0
+; X64-AVX512-NEXT: movzbl %ah, %ecx
+; X64-AVX512-NEXT: kmovd %ecx, %k1
+; X64-AVX512-NEXT: kunpckbw %k0, %k1, %k0
+; X64-AVX512-NEXT: movl %eax, %ecx
+; X64-AVX512-NEXT: shrl $24, %ecx
+; X64-AVX512-NEXT: kmovd %ecx, %k1
+; X64-AVX512-NEXT: movl %eax, %ecx
+; X64-AVX512-NEXT: shrl $16, %ecx
+; X64-AVX512-NEXT: movzbl %cl, %ecx
+; X64-AVX512-NEXT: kmovd %ecx, %k2
+; X64-AVX512-NEXT: kunpckbw %k2, %k1, %k1
+; X64-AVX512-NEXT: kunpckwd %k0, %k1, %k0
+; X64-AVX512-NEXT: movq %rdi, %rcx
+; X64-AVX512-NEXT: shrq $32, %rcx
+; X64-AVX512-NEXT: movzbl %cl, %ecx
+; X64-AVX512-NEXT: kmovd %ecx, %k1
+; X64-AVX512-NEXT: movq %rdi, %rcx
+; X64-AVX512-NEXT: shrq $40, %rcx
+; X64-AVX512-NEXT: movzbl %cl, %ecx
+; X64-AVX512-NEXT: kmovd %ecx, %k2
; X64-AVX512-NEXT: kunpckbw %k1, %k2, %k1
-; X64-AVX512-NEXT: kshiftrq $48, %k0, %k2
-; X64-AVX512-NEXT: kshiftrq $56, %k0, %k3
-; X64-AVX512-NEXT: kunpckbw %k2, %k3, %k2
+; X64-AVX512-NEXT: movq %rdi, %rcx
+; X64-AVX512-NEXT: shrq $56, %rcx
+; X64-AVX512-NEXT: kmovd %ecx, %k2
+; X64-AVX512-NEXT: shrq $48, %rax
+; X64-AVX512-NEXT: movzbl %al, %eax
+; X64-AVX512-NEXT: kmovd %eax, %k3
+; X64-AVX512-NEXT: kunpckbw %k3, %k2, %k2
; X64-AVX512-NEXT: kunpckwd %k1, %k2, %k1
-; X64-AVX512-NEXT: kshiftrd $8, %k0, %k2
-; X64-AVX512-NEXT: kunpckbw %k0, %k2, %k2
-; X64-AVX512-NEXT: kshiftrq $24, %k0, %k3
-; X64-AVX512-NEXT: kshiftrd $16, %k0, %k0
-; X64-AVX512-NEXT: kunpckbw %k0, %k3, %k0
-; X64-AVX512-NEXT: kunpckwd %k2, %k0, %k0
; X64-AVX512-NEXT: kunpckdq %k0, %k1, %k0
; X64-AVX512-NEXT: vpmovm2b %k0, %zmm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: i64_mask_extract_64:
; X64-KNL: # %bb.0: # %entry
-; X64-KNL-NEXT: movq %rdi, %rax
-; X64-KNL-NEXT: kmovw %esi, %k0
-; X64-KNL-NEXT: movl %esi, %ecx
-; X64-KNL-NEXT: shrl $8, %ecx
-; X64-KNL-NEXT: kmovw %ecx, %k1
-; X64-KNL-NEXT: kunpckbw %k0, %k1, %k0
+; X64-KNL-NEXT: pushq %rbx
+; X64-KNL-NEXT: .cfi_def_cfa_offset 16
+; X64-KNL-NEXT: .cfi_offset %rbx, -16
; X64-KNL-NEXT: movq %rsi, %rcx
-; X64-KNL-NEXT: shrq $24, %rcx
+; X64-KNL-NEXT: movq %rdi, %rax
+; X64-KNL-NEXT: movl %ecx, %edx
+; X64-KNL-NEXT: movq %rsi, %rdi
+; X64-KNL-NEXT: movq %rsi, %r8
+; X64-KNL-NEXT: movq %rsi, %r9
+; X64-KNL-NEXT: kmovw %ecx, %k0
+; X64-KNL-NEXT: movzbl %ch, %ebx
+; X64-KNL-NEXT: # kill: def $ecx killed $ecx killed $rcx
+; X64-KNL-NEXT: shrl $24, %ecx
; X64-KNL-NEXT: kmovw %ecx, %k1
-; X64-KNL-NEXT: movl %esi, %ecx
-; X64-KNL-NEXT: shrl $16, %ecx
+; X64-KNL-NEXT: shrl $16, %edx
+; X64-KNL-NEXT: movzbl %dl, %ecx
; X64-KNL-NEXT: kmovw %ecx, %k2
-; X64-KNL-NEXT: kunpckbw %k2, %k1, %k1
-; X64-KNL-NEXT: movq %rsi, %rcx
-; X64-KNL-NEXT: shrq $32, %rcx
-; X64-KNL-NEXT: kmovw %ecx, %k2
-; X64-KNL-NEXT: movq %rsi, %rcx
-; X64-KNL-NEXT: shrq $40, %rcx
+; X64-KNL-NEXT: shrq $32, %rsi
+; X64-KNL-NEXT: movzbl %sil, %ecx
; X64-KNL-NEXT: kmovw %ecx, %k3
-; X64-KNL-NEXT: kunpckbw %k2, %k3, %k2
-; X64-KNL-NEXT: movq %rsi, %rcx
-; X64-KNL-NEXT: shrq $48, %rcx
-; X64-KNL-NEXT: kmovw %ecx, %k3
-; X64-KNL-NEXT: shrq $56, %rsi
-; X64-KNL-NEXT: kmovw %esi, %k4
+; X64-KNL-NEXT: shrq $40, %rdi
+; X64-KNL-NEXT: movzbl %dil, %ecx
+; X64-KNL-NEXT: kmovw %ecx, %k4
+; X64-KNL-NEXT: kunpckbw %k2, %k1, %k1
+; X64-KNL-NEXT: shrq $56, %r8
+; X64-KNL-NEXT: kmovw %r8d, %k2
; X64-KNL-NEXT: kunpckbw %k3, %k4, %k3
-; X64-KNL-NEXT: kmovw %k3, 6(%rdi)
-; X64-KNL-NEXT: kmovw %k2, 4(%rdi)
-; X64-KNL-NEXT: kmovw %k1, 2(%rdi)
-; X64-KNL-NEXT: kmovw %k0, (%rdi)
+; X64-KNL-NEXT: shrq $48, %r9
+; X64-KNL-NEXT: movzbl %r9b, %ecx
+; X64-KNL-NEXT: kmovw %ecx, %k4
+; X64-KNL-NEXT: kunpckbw %k4, %k2, %k2
+; X64-KNL-NEXT: kmovw %ebx, %k4
+; X64-KNL-NEXT: kunpckbw %k0, %k4, %k0
+; X64-KNL-NEXT: kmovw %k0, (%rax)
+; X64-KNL-NEXT: kmovw %k2, 6(%rax)
+; X64-KNL-NEXT: kmovw %k3, 4(%rax)
+; X64-KNL-NEXT: kmovw %k1, 2(%rax)
+; X64-KNL-NEXT: popq %rbx
+; X64-KNL-NEXT: .cfi_def_cfa_offset 8
; X64-KNL-NEXT: retq
entry:
%.splatinsert = insertelement <64 x i64> poison, i64 %mask, i64 0
>From 0feb0a1ef7d6cc2469a24252a107f88333f219c1 Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Tue, 25 Feb 2025 13:42:19 +0530
Subject: [PATCH 15/17] Remove basic block name from tests
---
llvm/test/CodeGen/X86/kmov.ll | 100 ++++++++++++++--------------------
1 file changed, 40 insertions(+), 60 deletions(-)
diff --git a/llvm/test/CodeGen/X86/kmov.ll b/llvm/test/CodeGen/X86/kmov.ll
index 1ff3ead3a3c86..55fb2527722a4 100644
--- a/llvm/test/CodeGen/X86/kmov.ll
+++ b/llvm/test/CodeGen/X86/kmov.ll
@@ -4,13 +4,13 @@
define <2 x i1> @i8_mask_extract2(i8 %mask) {
; X64-AVX512-LABEL: i8_mask_extract2:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovd %edi, %k0
; X64-AVX512-NEXT: vpmovm2q %k0, %xmm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: i8_mask_extract2:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: vmovd %edi, %xmm0
; X64-KNL-NEXT: vpbroadcastw {{.*#+}} xmm1 = [1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2]
; X64-KNL-NEXT: vpbroadcastb %xmm0, %xmm0
@@ -18,7 +18,6 @@ define <2 x i1> @i8_mask_extract2(i8 %mask) {
; X64-KNL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; X64-KNL-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <2 x i8> poison, i8 %mask, i64 0
%.splat = shufflevector <2 x i8> %.splatinsert, <2 x i8> poison, <2 x i32> zeroinitializer
%1 = and <2 x i8> %.splat, <i8 1, i8 2>
@@ -28,14 +27,14 @@ entry:
define <2 x i1> @invert_i8_mask_extract2(i8 %mask) {
; X64-AVX512-LABEL: invert_i8_mask_extract2:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovd %edi, %k0
; X64-AVX512-NEXT: knotw %k0, %k0
; X64-AVX512-NEXT: vpmovm2q %k0, %xmm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: invert_i8_mask_extract2:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: vmovd %edi, %xmm0
; X64-KNL-NEXT: vpbroadcastb %xmm0, %xmm0
; X64-KNL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -43,7 +42,6 @@ define <2 x i1> @invert_i8_mask_extract2(i8 %mask) {
; X64-KNL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; X64-KNL-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <2 x i8> poison, i8 %mask, i64 0
%.splat = shufflevector <2 x i8> %.splatinsert, <2 x i8> poison, <2 x i32> zeroinitializer
%1 = and <2 x i8> %.splat, <i8 1, i8 2>
@@ -53,13 +51,13 @@ entry:
define <4 x i1> @i8_mask_extract_4(i8 %mask) {
; X64-AVX512-LABEL: i8_mask_extract_4:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovd %edi, %k0
; X64-AVX512-NEXT: vpmovm2d %k0, %xmm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: i8_mask_extract_4:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: vmovd %edi, %xmm0
; X64-KNL-NEXT: vpbroadcastb %xmm0, %xmm0
; X64-KNL-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,2,4,8,1,2,4,8,1,2,4,8,1,2,4,8]
@@ -67,7 +65,6 @@ define <4 x i1> @i8_mask_extract_4(i8 %mask) {
; X64-KNL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; X64-KNL-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <4 x i8> poison, i8 %mask, i64 0
%.splat = shufflevector <4 x i8> %.splatinsert, <4 x i8> poison, <4 x i32> zeroinitializer
%1 = and <4 x i8> %.splat, <i8 1, i8 2, i8 4, i8 8>
@@ -77,14 +74,14 @@ entry:
define <4 x i1> @invert_i8_mask_extract_4(i8 %mask) {
; X64-AVX512-LABEL: invert_i8_mask_extract_4:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovd %edi, %k0
; X64-AVX512-NEXT: knotw %k0, %k0
; X64-AVX512-NEXT: vpmovm2d %k0, %xmm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: invert_i8_mask_extract_4:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: vmovd %edi, %xmm0
; X64-KNL-NEXT: vpbroadcastb %xmm0, %xmm0
; X64-KNL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -92,7 +89,6 @@ define <4 x i1> @invert_i8_mask_extract_4(i8 %mask) {
; X64-KNL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; X64-KNL-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <4 x i8> poison, i8 %mask, i64 0
%.splat = shufflevector <4 x i8> %.splatinsert, <4 x i8> poison, <4 x i32> zeroinitializer
%1 = and <4 x i8> %.splat, <i8 1, i8 2, i8 4, i8 8>
@@ -102,13 +98,13 @@ entry:
define <8 x i1> @i8_mask_extract_8(i8 %mask) {
; X64-AVX512-LABEL: i8_mask_extract_8:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovd %edi, %k0
; X64-AVX512-NEXT: vpmovm2w %k0, %xmm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: i8_mask_extract_8:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: vmovd %edi, %xmm0
; X64-KNL-NEXT: vpbroadcastb %xmm0, %xmm0
; X64-KNL-NEXT: vpbroadcastq {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
@@ -116,7 +112,6 @@ define <8 x i1> @i8_mask_extract_8(i8 %mask) {
; X64-KNL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; X64-KNL-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <8 x i8> poison, i8 %mask, i64 0
%.splat = shufflevector <8 x i8> %.splatinsert, <8 x i8> poison, <8 x i32> zeroinitializer
%1 = and <8 x i8> %.splat, <i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128>
@@ -126,14 +121,14 @@ entry:
define <8 x i1> @invert_i8_mask_extract_8(i8 %mask) {
; X64-AVX512-LABEL: invert_i8_mask_extract_8:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovd %edi, %k0
; X64-AVX512-NEXT: knotb %k0, %k0
; X64-AVX512-NEXT: vpmovm2w %k0, %xmm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: invert_i8_mask_extract_8:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: vmovd %edi, %xmm0
; X64-KNL-NEXT: vpbroadcastb %xmm0, %xmm0
; X64-KNL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -141,7 +136,6 @@ define <8 x i1> @invert_i8_mask_extract_8(i8 %mask) {
; X64-KNL-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
; X64-KNL-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <8 x i8> poison, i8 %mask, i64 0
%.splat = shufflevector <8 x i8> %.splatinsert, <8 x i8> poison, <8 x i32> zeroinitializer
%1 = and <8 x i8> %.splat, <i8 1, i8 2, i8 4, i8 8, i8 16, i8 32, i8 64, i8 128>
@@ -151,13 +145,13 @@ entry:
define <4 x i1> @i16_mask_extract_4(i16 %mask) {
; X64-AVX512-LABEL: i16_mask_extract_4:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovd %edi, %k0
; X64-AVX512-NEXT: vpmovm2d %k0, %xmm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: i16_mask_extract_4:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: vmovd %edi, %xmm0
; X64-KNL-NEXT: vpbroadcastw %xmm0, %xmm0
; X64-KNL-NEXT: vpbroadcastq {{.*#+}} xmm1 = [1,2,4,8,1,2,4,8]
@@ -165,7 +159,6 @@ define <4 x i1> @i16_mask_extract_4(i16 %mask) {
; X64-KNL-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; X64-KNL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <4 x i16> poison, i16 %mask, i64 0
%.splat = shufflevector <4 x i16> %.splatinsert, <4 x i16> poison, <4 x i32> zeroinitializer
%1 = and <4 x i16> %.splat, <i16 1, i16 2, i16 4, i16 8>
@@ -175,14 +168,14 @@ entry:
define <4 x i1> @invert_i16_mask_extract_4(i16 %mask) {
; X64-AVX512-LABEL: invert_i16_mask_extract_4:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovd %edi, %k0
; X64-AVX512-NEXT: knotw %k0, %k0
; X64-AVX512-NEXT: vpmovm2d %k0, %xmm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: invert_i16_mask_extract_4:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: vmovd %edi, %xmm0
; X64-KNL-NEXT: vpbroadcastw %xmm0, %xmm0
; X64-KNL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
@@ -190,7 +183,6 @@ define <4 x i1> @invert_i16_mask_extract_4(i16 %mask) {
; X64-KNL-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; X64-KNL-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <4 x i16> poison, i16 %mask, i64 0
%.splat = shufflevector <4 x i16> %.splatinsert, <4 x i16> poison, <4 x i32> zeroinitializer
%1 = and <4 x i16> %.splat, <i16 1, i16 2, i16 4, i16 8>
@@ -200,7 +192,7 @@ entry:
define <8 x i1> @i16_mask_extract_8(i16 %mask) {
; X64-AVX512-LABEL: i16_mask_extract_8:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vpbroadcastw %edi, %xmm0
; X64-AVX512-NEXT: vpmovzxbw {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
; X64-AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
@@ -208,14 +200,13 @@ define <8 x i1> @i16_mask_extract_8(i16 %mask) {
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: i16_mask_extract_8:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: vmovd %edi, %xmm0
; X64-KNL-NEXT: vpbroadcastw %xmm0, %xmm0
; X64-KNL-NEXT: vpmovzxbw {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128]
; X64-KNL-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-KNL-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <8 x i16> poison, i16 %mask, i64 0
%.splat = shufflevector <8 x i16> %.splatinsert, <8 x i16> poison, <8 x i32> zeroinitializer
%1 = and <8 x i16> %.splat, <i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128>
@@ -225,7 +216,7 @@ entry:
define <8 x i1> @invert_i16_mask_extract_8(i16 %mask) {
; X64-AVX512-LABEL: invert_i16_mask_extract_8:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: vpbroadcastw %edi, %xmm0
; X64-AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -233,14 +224,13 @@ define <8 x i1> @invert_i16_mask_extract_8(i16 %mask) {
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: invert_i16_mask_extract_8:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: vmovd %edi, %xmm0
; X64-KNL-NEXT: vpbroadcastw %xmm0, %xmm0
; X64-KNL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-KNL-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <8 x i16> poison, i16 %mask, i64 0
%.splat = shufflevector <8 x i16> %.splatinsert, <8 x i16> poison, <8 x i32> zeroinitializer
%1 = and <8 x i16> %.splat, <i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128>
@@ -250,13 +240,13 @@ entry:
define <16 x i1> @i16_mask_extract_16(i16 %mask) {
; X64-AVX512-LABEL: i16_mask_extract_16:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovd %edi, %k0
; X64-AVX512-NEXT: vpmovm2b %k0, %xmm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: i16_mask_extract_16:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: vmovd %edi, %xmm0
; X64-KNL-NEXT: vpbroadcastw %xmm0, %ymm0
; X64-KNL-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768]
@@ -265,7 +255,6 @@ define <16 x i1> @i16_mask_extract_16(i16 %mask) {
; X64-KNL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; X64-KNL-NEXT: vpmovdb %zmm0, %xmm0
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <16 x i16> poison, i16 %mask, i64 0
%.splat = shufflevector <16 x i16> %.splatinsert, <16 x i16> poison, <16 x i32> zeroinitializer
%1 = and <16 x i16> %.splat, <i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 512, i16 1024, i16 2048, i16 4096, i16 8192, i16 16384, i16 32768>
@@ -275,14 +264,14 @@ entry:
define <16 x i1> @invert_i16_mask_extract_16(i16 %mask) {
; X64-AVX512-LABEL: invert_i16_mask_extract_16:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovd %edi, %k0
; X64-AVX512-NEXT: knotw %k0, %k0
; X64-AVX512-NEXT: vpmovm2b %k0, %xmm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: invert_i16_mask_extract_16:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: vmovd %edi, %xmm0
; X64-KNL-NEXT: vpbroadcastw %xmm0, %ymm0
; X64-KNL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
@@ -291,7 +280,6 @@ define <16 x i1> @invert_i16_mask_extract_16(i16 %mask) {
; X64-KNL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; X64-KNL-NEXT: vpmovdb %zmm0, %xmm0
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <16 x i16> poison, i16 %mask, i64 0
%.splat = shufflevector <16 x i16> %.splatinsert, <16 x i16> poison, <16 x i32> zeroinitializer
%1 = and <16 x i16> %.splat, <i16 1, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 512, i16 1024, i16 2048, i16 4096, i16 8192, i16 16384, i16 32768>
@@ -301,18 +289,17 @@ entry:
define <16 x i1> @i32_mask_extract_16(i32 %mask) {
; X64-AVX512-LABEL: i32_mask_extract_16:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovd %edi, %k0
; X64-AVX512-NEXT: vpmovm2b %k0, %xmm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: i32_mask_extract_16:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: kmovw %edi, %k1
; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; X64-KNL-NEXT: vpmovdb %zmm0, %xmm0
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <16 x i32> poison, i32 %mask, i64 0
%.splat = shufflevector <16 x i32> %.splatinsert, <16 x i32> poison, <16 x i32> zeroinitializer
%1 = and <16 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768>
@@ -322,20 +309,19 @@ entry:
define <16 x i1> @invert_i32_mask_extract_16(i32 %mask) {
; X64-AVX512-LABEL: invert_i32_mask_extract_16:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovd %edi, %k0
; X64-AVX512-NEXT: knotw %k0, %k0
; X64-AVX512-NEXT: vpmovm2b %k0, %xmm0
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: invert_i32_mask_extract_16:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: kmovw %edi, %k0
; X64-KNL-NEXT: knotw %k0, %k1
; X64-KNL-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; X64-KNL-NEXT: vpmovdb %zmm0, %xmm0
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <16 x i32> poison, i32 %mask, i64 0
%.splat = shufflevector <16 x i32> %.splatinsert, <16 x i32> poison, <16 x i32> zeroinitializer
%1 = and <16 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768>
@@ -345,7 +331,7 @@ entry:
define <32 x i1> @i32_mask_extract_32(i32 %mask) {
; X64-AVX512-LABEL: i32_mask_extract_32:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovd %edi, %k0
; X64-AVX512-NEXT: kshiftrd $16, %k0, %k1
; X64-AVX512-NEXT: kunpckwd %k0, %k1, %k0
@@ -353,7 +339,7 @@ define <32 x i1> @i32_mask_extract_32(i32 %mask) {
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: i32_mask_extract_32:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: kmovw %edi, %k1
; X64-KNL-NEXT: shrl $16, %edi
; X64-KNL-NEXT: kmovw %edi, %k2
@@ -363,7 +349,6 @@ define <32 x i1> @i32_mask_extract_32(i32 %mask) {
; X64-KNL-NEXT: vpmovdb %zmm1, %xmm1
; X64-KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <32 x i32> poison, i32 %mask, i64 0
%.splat = shufflevector <32 x i32> %.splatinsert, <32 x i32> poison, <32 x i32> zeroinitializer
%1 = and <32 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768, i32 65536, i32 131072, i32 262144, i32 524288, i32 1048576, i32 2097152, i32 4194304, i32 8388608, i32 16777216, i32 33554432, i32 67108864, i32 134217728, i32 268435456, i32 536870912, i32 1073741824, i32 2147483648>
@@ -373,7 +358,7 @@ entry:
define <32 x i1> @invert_i32_mask_extract_32(i32 %mask) {
; X64-AVX512-LABEL: invert_i32_mask_extract_32:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovd %edi, %k0
; X64-AVX512-NEXT: kshiftrd $16, %k0, %k1
; X64-AVX512-NEXT: kunpckwd %k0, %k1, %k0
@@ -381,7 +366,7 @@ define <32 x i1> @invert_i32_mask_extract_32(i32 %mask) {
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: invert_i32_mask_extract_32:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: kmovw %edi, %k1
; X64-KNL-NEXT: shrl $16, %edi
; X64-KNL-NEXT: kmovw %edi, %k2
@@ -391,7 +376,6 @@ define <32 x i1> @invert_i32_mask_extract_32(i32 %mask) {
; X64-KNL-NEXT: vpmovdb %zmm1, %xmm1
; X64-KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <32 x i32> poison, i32 %mask, i64 0
%.splat = shufflevector <32 x i32> %.splatinsert, <32 x i32> poison, <32 x i32> zeroinitializer
%1 = and <32 x i32> %.splat, <i32 1, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 512, i32 1024, i32 2048, i32 4096, i32 8192, i32 16384, i32 32768, i32 65536, i32 131072, i32 262144, i32 524288, i32 1048576, i32 2097152, i32 4194304, i32 8388608, i32 16777216, i32 33554432, i32 67108864, i32 134217728, i32 268435456, i32 536870912, i32 1073741824, i32 2147483648>
@@ -401,7 +385,7 @@ entry:
define <32 x i1> @i64_mask_extract_32(i64 %mask) {
; X64-AVX512-LABEL: i64_mask_extract_32:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: movq %rdi, %rax
; X64-AVX512-NEXT: kmovd %eax, %k0
; X64-AVX512-NEXT: movzbl %ah, %ecx
@@ -419,7 +403,7 @@ define <32 x i1> @i64_mask_extract_32(i64 %mask) {
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: i64_mask_extract_32:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: movq %rdi, %rax
; X64-KNL-NEXT: movl %eax, %ecx
; X64-KNL-NEXT: kmovw %eax, %k0
@@ -439,7 +423,6 @@ define <32 x i1> @i64_mask_extract_32(i64 %mask) {
; X64-KNL-NEXT: vpmovdb %zmm1, %xmm1
; X64-KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <32 x i64> poison, i64 %mask, i64 0
%.splat = shufflevector <32 x i64> %.splatinsert, <32 x i64> poison, <32 x i32> zeroinitializer
%1 = and <32 x i64> %.splat, <i64 1, i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256, i64 512, i64 1024, i64 2048, i64 4096, i64 8192, i64 16384, i64 32768, i64 65536, i64 131072, i64 262144, i64 524288, i64 1048576, i64 2097152, i64 4194304, i64 8388608, i64 16777216, i64 33554432, i64 67108864, i64 134217728, i64 268435456, i64 536870912, i64 1073741824, i64 2147483648>
@@ -449,7 +432,7 @@ entry:
define <32 x i1> @invert_i64_mask_extract_32(i64 %mask) {
; X64-AVX512-LABEL: invert_i64_mask_extract_32:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovq %rdi, %k0
; X64-AVX512-NEXT: knotb %k0, %k1
; X64-AVX512-NEXT: kshiftrd $8, %k0, %k2
@@ -465,7 +448,7 @@ define <32 x i1> @invert_i64_mask_extract_32(i64 %mask) {
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: invert_i64_mask_extract_32:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: movl %edi, %eax
; X64-KNL-NEXT: shrl $16, %eax
; X64-KNL-NEXT: kmovw %eax, %k0
@@ -487,7 +470,6 @@ define <32 x i1> @invert_i64_mask_extract_32(i64 %mask) {
; X64-KNL-NEXT: vpmovdb %zmm1, %xmm1
; X64-KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <32 x i64> poison, i64 %mask, i64 0
%.splat = shufflevector <32 x i64> %.splatinsert, <32 x i64> poison, <32 x i32> zeroinitializer
%1 = and <32 x i64> %.splat, <i64 1, i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256, i64 512, i64 1024, i64 2048, i64 4096, i64 8192, i64 16384, i64 32768, i64 65536, i64 131072, i64 262144, i64 524288, i64 1048576, i64 2097152, i64 4194304, i64 8388608, i64 16777216, i64 33554432, i64 67108864, i64 134217728, i64 268435456, i64 536870912, i64 1073741824, i64 2147483648>
@@ -497,7 +479,7 @@ entry:
define <64 x i1> @i64_mask_extract_64(i64 %mask) {
; X64-AVX512-LABEL: i64_mask_extract_64:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: movq %rdi, %rax
; X64-AVX512-NEXT: kmovd %eax, %k0
; X64-AVX512-NEXT: movzbl %ah, %ecx
@@ -534,7 +516,7 @@ define <64 x i1> @i64_mask_extract_64(i64 %mask) {
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: i64_mask_extract_64:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: pushq %rbx
; X64-KNL-NEXT: .cfi_def_cfa_offset 16
; X64-KNL-NEXT: .cfi_offset %rbx, -16
@@ -575,7 +557,6 @@ define <64 x i1> @i64_mask_extract_64(i64 %mask) {
; X64-KNL-NEXT: popq %rbx
; X64-KNL-NEXT: .cfi_def_cfa_offset 8
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <64 x i64> poison, i64 %mask, i64 0
%.splat = shufflevector <64 x i64> %.splatinsert, <64 x i64> poison, <64 x i32> zeroinitializer
%1 = and <64 x i64> %.splat, <i64 1, i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256, i64 512, i64 1024, i64 2048, i64 4096, i64 8192, i64 16384, i64 32768, i64 65536, i64 131072, i64 262144, i64 524288, i64 1048576, i64 2097152, i64 4194304, i64 8388608, i64 16777216, i64 33554432, i64 67108864, i64 134217728, i64 268435456, i64 536870912, i64 1073741824, i64 2147483648, i64 4294967296, i64 8589934592, i64 17179869184, i64 34359738368, i64 68719476736, i64 137438953472, i64 274877906944, i64 549755813888, i64 1099511627776, i64 2199023255552, i64 4398046511104, i64 8796093022208, i64 17592186044416, i64 35184372088832, i64 70368744177664, i64 140737488355328, i64 281474976710656, i64 562949953421312, i64 1125899906842624, i64 2251799813685248, i64 4503599627370496, i64 9007199254740992, i64 18014398509481984, i64 36028797018963968, i64 72057594037927936, i64 144115188075855872, i64 288230376151711744, i64 576460752303423488, i64 1152921504606846976, i64 2305843009213693952, i64 4611686018427387904, i64 9223372036854775808>
@@ -585,7 +566,7 @@ entry:
define <64 x i1> @invert_i64_mask_extract_64(i64 %mask) {
; X64-AVX512-LABEL: invert_i64_mask_extract_64:
-; X64-AVX512: # %bb.0: # %entry
+; X64-AVX512: # %bb.0:
; X64-AVX512-NEXT: kmovq %rdi, %k0
; X64-AVX512-NEXT: kshiftrq $32, %k0, %k1
; X64-AVX512-NEXT: knotb %k1, %k1
@@ -613,7 +594,7 @@ define <64 x i1> @invert_i64_mask_extract_64(i64 %mask) {
; X64-AVX512-NEXT: retq
;
; X64-KNL-LABEL: invert_i64_mask_extract_64:
-; X64-KNL: # %bb.0: # %entry
+; X64-KNL: # %bb.0:
; X64-KNL-NEXT: movq %rdi, %rax
; X64-KNL-NEXT: kmovw %esi, %k0
; X64-KNL-NEXT: knotw %k0, %k0
@@ -653,7 +634,6 @@ define <64 x i1> @invert_i64_mask_extract_64(i64 %mask) {
; X64-KNL-NEXT: kmovw %k1, 2(%rdi)
; X64-KNL-NEXT: kmovw %k0, (%rdi)
; X64-KNL-NEXT: retq
-entry:
%.splatinsert = insertelement <64 x i64> poison, i64 %mask, i64 0
%.splat = shufflevector <64 x i64> %.splatinsert, <64 x i64> poison, <64 x i32> zeroinitializer
%1 = and <64 x i64> %.splat, <i64 1, i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256, i64 512, i64 1024, i64 2048, i64 4096, i64 8192, i64 16384, i64 32768, i64 65536, i64 131072, i64 262144, i64 524288, i64 1048576, i64 2097152, i64 4194304, i64 8388608, i64 16777216, i64 33554432, i64 67108864, i64 134217728, i64 268435456, i64 536870912, i64 1073741824, i64 2147483648, i64 4294967296, i64 8589934592, i64 17179869184, i64 34359738368, i64 68719476736, i64 137438953472, i64 274877906944, i64 549755813888, i64 1099511627776, i64 2199023255552, i64 4398046511104, i64 8796093022208, i64 17592186044416, i64 35184372088832, i64 70368744177664, i64 140737488355328, i64 281474976710656, i64 562949953421312, i64 1125899906842624, i64 2251799813685248, i64 4503599627370496, i64 9007199254740992, i64 18014398509481984, i64 36028797018963968, i64 72057594037927936, i64 144115188075855872, i64 288230376151711744, i64 576460752303423488, i64 1152921504606846976, i64 2305843009213693952, i64 4611686018427387904, i64 9223372036854775808>
>From 250fc9c09cda20b02c8c515e2cb52aea4dfb1e72 Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Tue, 25 Feb 2025 13:45:18 +0530
Subject: [PATCH 16/17] Use getVectorIdxConstant instead of getConstant
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index c4bcad8b4a6b9..c20f55807fd90 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -55736,14 +55736,11 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
return SDValue();
}
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- const DataLayout &DataLayout = DAG.getDataLayout();
- MVT VecIdxTy = TLI.getVectorIdxTy(DataLayout);
MVT BroadcastOpVT = Broadcast.getSimpleValueType().getVectorElementType();
SDValue BroadcastOp;
if (Broadcast.getOpcode() != X86ISD::VBROADCAST) {
BroadcastOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, BroadcastOpVT,
- Broadcast, DAG.getConstant(0, DL, VecIdxTy));
+ Broadcast, DAG.getVectorIdxConstant(0, DL));
} else {
BroadcastOp = Broadcast.getOperand(0);
if (BroadcastOp.getValueType().isVector())
@@ -55762,6 +55759,8 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
// not fit in an i16 and a vXi32 where X > 16 is more than 512 bits.
SDValue Trunc = DAG.getAnyExtOrTrunc(Masked, DL, MVT::i16);
SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, MVT::v16i1, Trunc);
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ const DataLayout &DataLayout = DAG.getDataLayout();
MVT PtrTy = TLI.getPointerTy(DataLayout);
if (CC == ISD::SETEQ)
@@ -55772,7 +55771,7 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
if (VT != MVT::v16i1)
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Bitcast,
- DAG.getConstant(0, DL, PtrTy));
+ DAG.getVectorIdxConstant(0, DL));
return Bitcast;
}
>From 60a5c7067d1e148635c056f7eb1c3754772a4b50 Mon Sep 17 00:00:00 2001
From: abhishek-kaushik22 <abhishek.kaushik at intel.com>
Date: Tue, 25 Feb 2025 15:21:22 +0530
Subject: [PATCH 17/17] Use DAG.getNOT
---
llvm/lib/Target/X86/X86ISelLowering.cpp | 8 +-------
1 file changed, 1 insertion(+), 7 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 8be0b99bd8787..31e22d82886be 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -55760,15 +55760,9 @@ static SDValue combineAVX512SetCCToKMOV(EVT VT, SDValue Op0, ISD::CondCode CC,
// not fit in an i16 and a vXi32 where X > 16 is more than 512 bits.
SDValue Trunc = DAG.getAnyExtOrTrunc(Masked, DL, MVT::i16);
SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, MVT::v16i1, Trunc);
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- const DataLayout &DataLayout = DAG.getDataLayout();
- MVT PtrTy = TLI.getPointerTy(DataLayout);
if (CC == ISD::SETEQ)
- Bitcast =
- DAG.getNode(ISD::XOR, DL, MVT::v16i1, Bitcast,
- DAG.getSplatBuildVector(MVT::v16i1, DL,
- DAG.getAllOnesConstant(DL, PtrTy)));
+ Bitcast = DAG.getNOT(DL, Bitcast, MVT::v16i1);
if (VT != MVT::v16i1)
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Bitcast,
More information about the llvm-commits
mailing list