[llvm-branch-commits] [llvm] [SelectionDAG] Deal with POISON for INSERT_VECTOR_ELT/INSERT_SUBVECTOR (part 2) (PR #143103)
Björn Pettersson via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Jul 4 01:48:05 PDT 2025
https://github.com/bjope updated https://github.com/llvm/llvm-project/pull/143103
>From fe73a97a1ef8c1c2df5999e0b6abecde0e89733b Mon Sep 17 00:00:00 2001
From: Bjorn Pettersson <bjorn.a.pettersson at ericsson.com>
Date: Tue, 3 Jun 2025 10:01:01 +0200
Subject: [PATCH] [SelectionDAG] Deal with POISON for
INSERT_VECTOR_ELT/INSERT_SUBVECTOR (part 2)
Add support in isGuaranteedNotToBeUndefOrPoison to avoid regressions
seen after a previous commit fixing #141034.
---
llvm/include/llvm/CodeGen/SelectionDAGNodes.h | 6 +
.../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 71 ++++++
llvm/test/CodeGen/Thumb2/mve-vld3.ll | 4 +-
.../X86/merge-consecutive-loads-128.ll | 78 ++----
llvm/test/CodeGen/X86/mmx-build-vector.ll | 233 +++++-------------
llvm/test/CodeGen/X86/pr62286.ll | 14 +-
.../CodeGen/X86/vector-shuffle-combining.ll | 41 ++-
.../zero_extend_vector_inreg_of_broadcast.ll | 8 +-
8 files changed, 191 insertions(+), 264 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index a3675eecfea3f..08db31c63367d 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -1889,6 +1889,12 @@ LLVM_ABI SDValue peekThroughExtractSubvectors(SDValue V);
/// If \p V is not a truncation, it is returned as-is.
LLVM_ABI SDValue peekThroughTruncates(SDValue V);
+/// Recursively peek through INSERT_VECTOR_ELT nodes, returning the source
+/// vector operand of \p V, as long as \p V is an INSERT_VECTOR_ELT operation
+/// that do not insert into any of the demanded vector elts.
+LLVM_ABI SDValue peekThroughInsertVectorElt(SDValue V,
+ const APInt &DemandedElts);
+
/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
/// constant is canonicalized to be operand 1.
LLVM_ABI bool isBitwiseNot(SDValue V, bool AllowUndefs = false);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 3b4802d4b47b1..17fe550d38c55 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -5456,6 +5456,60 @@ bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op,
}
return true;
+ case ISD::INSERT_SUBVECTOR: {
+ if (Op.getValueType().isScalableVector())
+ break;
+ SDValue Src = Op.getOperand(0);
+ SDValue Sub = Op.getOperand(1);
+ uint64_t Idx = Op.getConstantOperandVal(2);
+ unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
+ APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
+ APInt DemandedSrcElts = DemandedElts;
+ DemandedSrcElts.clearBits(Idx, Idx + NumSubElts);
+
+ if (!!DemandedSubElts && !isGuaranteedNotToBeUndefOrPoison(
+ Sub, DemandedSubElts, PoisonOnly, Depth + 1))
+ return false;
+ if (!!DemandedSrcElts && !isGuaranteedNotToBeUndefOrPoison(
+ Src, DemandedSrcElts, PoisonOnly, Depth + 1))
+ return false;
+ return true;
+ }
+
+ case ISD::INSERT_VECTOR_ELT: {
+ SDValue InVec = Op.getOperand(0);
+ SDValue InVal = Op.getOperand(1);
+ SDValue EltNo = Op.getOperand(2);
+ EVT VT = InVec.getValueType();
+ auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
+ if (IndexC && VT.isFixedLengthVector() &&
+ IndexC->getZExtValue() < VT.getVectorNumElements()) {
+ if (DemandedElts[IndexC->getZExtValue()] &&
+ !isGuaranteedNotToBeUndefOrPoison(InVal, PoisonOnly, Depth + 1))
+ return false;
+ APInt InVecDemandedElts = DemandedElts;
+ InVecDemandedElts.clearBit(IndexC->getZExtValue());
+ if (!!InVecDemandedElts &&
+ !isGuaranteedNotToBeUndefOrPoison(
+ peekThroughInsertVectorElt(InVec, InVecDemandedElts),
+ InVecDemandedElts, PoisonOnly, Depth + 1))
+ return false;
+ return true;
+ }
+ break;
+ }
+
+ case ISD::SCALAR_TO_VECTOR:
+ // Check upper (known undef) elements.
+ if (DemandedElts.ugt(1) && !PoisonOnly)
+ return false;
+ // Check element zero.
+ if (DemandedElts[0] && !isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0),
+ PoisonOnly,
+ Depth + 1))
+ return false;
+ return true;
+
case ISD::SPLAT_VECTOR:
return isGuaranteedNotToBeUndefOrPoison(Op.getOperand(0), PoisonOnly,
Depth + 1);
@@ -12508,6 +12562,23 @@ SDValue llvm::peekThroughTruncates(SDValue V) {
return V;
}
+SDValue llvm::peekThroughInsertVectorElt(SDValue V, const APInt &DemandedElts) {
+ while (V.getOpcode() == ISD::INSERT_VECTOR_ELT) {
+ SDValue InVec = V.getOperand(0);
+ SDValue EltNo = V.getOperand(2);
+ EVT VT = InVec.getValueType();
+ auto *IndexC = dyn_cast<ConstantSDNode>(EltNo);
+ if (IndexC && VT.isFixedLengthVector() &&
+ IndexC->getZExtValue() < VT.getVectorNumElements() &&
+ !DemandedElts[IndexC->getZExtValue()]) {
+ V = InVec;
+ continue;
+ }
+ break;
+ }
+ return V;
+}
+
bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) {
if (V.getOpcode() != ISD::XOR)
return false;
diff --git a/llvm/test/CodeGen/Thumb2/mve-vld3.ll b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
index 38e42c137e3a9..4dd9173e2d418 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld3.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld3.ll
@@ -663,8 +663,8 @@ define void @vld3_v2i8(ptr %src, ptr %dst) {
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .pad #8
; CHECK-NEXT: sub sp, #8
-; CHECK-NEXT: ldrd r2, r0, [r0]
-; CHECK-NEXT: strd r2, r0, [sp]
+; CHECK-NEXT: ldrd r0, r2, [r0]
+; CHECK-NEXT: strd r0, r2, [sp]
; CHECK-NEXT: mov r0, sp
; CHECK-NEXT: vldrb.u16 q0, [r0]
; CHECK-NEXT: vmov.u16 r0, q0[4]
diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
index 1df4e9f47f21b..595f8491b405c 100644
--- a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
+++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll
@@ -262,54 +262,37 @@ define <4 x float> @merge_4f32_f32_45zz(ptr %ptr) nounwind uwtable noinline ssp
define <4 x float> @merge_4f32_f32_012u(ptr %ptr) nounwind uwtable noinline ssp {
; SSE2-LABEL: merge_4f32_f32_012u:
; SSE2: # %bb.0:
-; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: merge_4f32_f32_012u:
; SSE41: # %bb.0:
-; SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE41-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; SSE41-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
-; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: merge_4f32_f32_012u:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: retq
;
; X86-SSE1-LABEL: merge_4f32_f32_012u:
; X86-SSE1: # %bb.0:
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
; X86-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X86-SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X86-SSE1-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; X86-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-SSE1-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; X86-SSE1-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X86-SSE1-NEXT: retl
;
; X86-SSE41-LABEL: merge_4f32_f32_012u:
; X86-SSE41: # %bb.0:
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE41-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X86-SSE41-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
-; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; X86-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; X86-SSE41-NEXT: retl
%ptr1 = getelementptr inbounds float, ptr %ptr, i64 1
%ptr2 = getelementptr inbounds float, ptr %ptr, i64 2
@@ -326,54 +309,37 @@ define <4 x float> @merge_4f32_f32_012u(ptr %ptr) nounwind uwtable noinline ssp
define <4 x float> @merge_4f32_f32_019u(ptr %ptr) nounwind uwtable noinline ssp {
; SSE2-LABEL: merge_4f32_f32_019u:
; SSE2: # %bb.0:
-; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; SSE2-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: retq
;
; SSE41-LABEL: merge_4f32_f32_019u:
; SSE41: # %bb.0:
-; SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE41-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; SSE41-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
-; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; SSE41-NEXT: retq
;
; AVX-LABEL: merge_4f32_f32_019u:
; AVX: # %bb.0:
-; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; AVX-NEXT: retq
;
; X86-SSE1-LABEL: merge_4f32_f32_019u:
; X86-SSE1: # %bb.0:
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
; X86-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X86-SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X86-SSE1-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; X86-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-SSE1-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; X86-SSE1-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X86-SSE1-NEXT: retl
;
; X86-SSE41-LABEL: merge_4f32_f32_019u:
; X86-SSE41: # %bb.0:
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE41-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X86-SSE41-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
-; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; X86-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
; X86-SSE41-NEXT: retl
%ptr1 = getelementptr inbounds float, ptr %ptr, i64 1
%ptr2 = getelementptr inbounds float, ptr %ptr, i64 9
diff --git a/llvm/test/CodeGen/X86/mmx-build-vector.ll b/llvm/test/CodeGen/X86/mmx-build-vector.ll
index 10b7ad285fa7b..d8a010bacc683 100644
--- a/llvm/test/CodeGen/X86/mmx-build-vector.ll
+++ b/llvm/test/CodeGen/X86/mmx-build-vector.ll
@@ -2,11 +2,11 @@
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+mmx | FileCheck %s --check-prefixes=X86,X86-MMX
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefixes=X86,X86-SSE
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+mmx,+ssse3 | FileCheck %s --check-prefixes=X86,X86-SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefixes=X64,X64-SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+ssse3 | FileCheck %s --check-prefixes=X64,X64-SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx | FileCheck %s --check-prefixes=X64,X64-AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx2 | FileCheck %s --check-prefixes=X64,X64-AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx512f | FileCheck %s --check-prefixes=X64,X64-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+ssse3 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+mmx,+avx512f | FileCheck %s --check-prefix=X64
declare <1 x i64> @llvm.x86.mmx.padd.d(<1 x i64>, <1 x i64>)
@@ -290,21 +290,15 @@ define void @build_v4i16_0zuz(ptr%p0, i16 %a0, i16 %a1, i16 %a2, i16 %a3) nounwi
define void @build_v4i16_012u(ptr%p0, i16 %a0, i16 %a1, i16 %a2, i16 %a3) nounwind {
; X86-LABEL: build_v4i16_012u:
; X86: # %bb.0:
-; X86-NEXT: pushl %esi
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movd %eax, %mm0
-; X86-NEXT: movd %esi, %mm1
-; X86-NEXT: punpcklwd %mm0, %mm1 # mm1 = mm1[0],mm0[0],mm1[1],mm0[1]
-; X86-NEXT: movd %edx, %mm0
-; X86-NEXT: movd %ecx, %mm2
-; X86-NEXT: punpcklwd %mm0, %mm2 # mm2 = mm2[0],mm0[0],mm2[1],mm0[1]
-; X86-NEXT: punpckldq %mm1, %mm2 # mm2 = mm2[0],mm1[0]
+; X86-NEXT: movd {{[0-9]+}}(%esp), %mm0
+; X86-NEXT: punpcklwd %mm0, %mm0 # mm0 = mm0[0,0,1,1]
+; X86-NEXT: movd {{[0-9]+}}(%esp), %mm1
+; X86-NEXT: movd {{[0-9]+}}(%esp), %mm2
+; X86-NEXT: punpcklwd %mm1, %mm2 # mm2 = mm2[0],mm1[0],mm2[1],mm1[1]
+; X86-NEXT: punpckldq %mm0, %mm2 # mm2 = mm2[0],mm0[0]
; X86-NEXT: paddd %mm2, %mm2
; X86-NEXT: movq %mm2, (%eax)
-; X86-NEXT: popl %esi
; X86-NEXT: retl
;
; X64-LABEL: build_v4i16_012u:
@@ -481,107 +475,45 @@ define void @build_v8i8_0u2345z7(ptr%p0, i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4,
ret void
}
-
-; Recursion depth limit in isGuaranteedNotToBeUndefOrPoison prevents llc from
-; detecting that we insert an "undef" element in a position that already is
-; undef. OTOH, opt would optimize away that insertelement operation from the
-; IR, so maybe that isn't a problem in reality.
define void @build_v8i8_0123zzzu(ptr%p0, i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7) nounwind {
-; X86-MMX-LABEL: build_v8i8_0123zzzu:
-; X86-MMX: # %bb.0:
-; X86-MMX-NEXT: pushl %ebp
-; X86-MMX-NEXT: movl %esp, %ebp
-; X86-MMX-NEXT: pushl %esi
-; X86-MMX-NEXT: andl $-8, %esp
-; X86-MMX-NEXT: subl $16, %esp
-; X86-MMX-NEXT: movl 8(%ebp), %eax
-; X86-MMX-NEXT: movzbl 20(%ebp), %edx
-; X86-MMX-NEXT: movzbl 24(%ebp), %ecx
-; X86-MMX-NEXT: shll $8, %ecx
-; X86-MMX-NEXT: orl %edx, %ecx
-; X86-MMX-NEXT: shll $16, %ecx
-; X86-MMX-NEXT: movzbl 12(%ebp), %edx
-; X86-MMX-NEXT: movzbl 16(%ebp), %esi
-; X86-MMX-NEXT: shll $8, %esi
-; X86-MMX-NEXT: orl %edx, %esi
-; X86-MMX-NEXT: movzwl %si, %edx
-; X86-MMX-NEXT: orl %ecx, %edx
-; X86-MMX-NEXT: movzbl %al, %ecx
-; X86-MMX-NEXT: shll $24, %ecx
-; X86-MMX-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-MMX-NEXT: movl %edx, (%esp)
-; X86-MMX-NEXT: movq (%esp), %mm0
-; X86-MMX-NEXT: paddd %mm0, %mm0
-; X86-MMX-NEXT: movq %mm0, (%eax)
-; X86-MMX-NEXT: leal -4(%ebp), %esp
-; X86-MMX-NEXT: popl %esi
-; X86-MMX-NEXT: popl %ebp
-; X86-MMX-NEXT: retl
-;
-; X86-SSE-LABEL: build_v8i8_0123zzzu:
-; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %edx
-; X86-SSE-NEXT: shll $8, %edx
-; X86-SSE-NEXT: orl %ecx, %edx
-; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-SSE-NEXT: shll $16, %ecx
-; X86-SSE-NEXT: orl %edx, %ecx
-; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %edx
-; X86-SSE-NEXT: shll $24, %edx
-; X86-SSE-NEXT: orl %ecx, %edx
-; X86-SSE-NEXT: movd %edx, %xmm0
-; X86-SSE-NEXT: movdq2q %xmm0, %mm0
-; X86-SSE-NEXT: paddd %mm0, %mm0
-; X86-SSE-NEXT: movq %mm0, (%eax)
-; X86-SSE-NEXT: retl
+; X86-LABEL: build_v8i8_0123zzzu:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movd {{[0-9]+}}(%esp), %mm0
+; X86-NEXT: movd {{[0-9]+}}(%esp), %mm1
+; X86-NEXT: punpcklbw %mm0, %mm1 # mm1 = mm1[0],mm0[0],mm1[1],mm0[1],mm1[2],mm0[2],mm1[3],mm0[3]
+; X86-NEXT: movd {{[0-9]+}}(%esp), %mm0
+; X86-NEXT: movd {{[0-9]+}}(%esp), %mm2
+; X86-NEXT: punpcklbw %mm0, %mm2 # mm2 = mm2[0],mm0[0],mm2[1],mm0[1],mm2[2],mm0[2],mm2[3],mm0[3]
+; X86-NEXT: punpcklwd %mm1, %mm2 # mm2 = mm2[0],mm1[0],mm2[1],mm1[1]
+; X86-NEXT: pxor %mm0, %mm0
+; X86-NEXT: pxor %mm1, %mm1
+; X86-NEXT: punpcklbw %mm1, %mm1 # mm1 = mm1[0,0,1,1,2,2,3,3]
+; X86-NEXT: punpcklbw %mm0, %mm0 # mm0 = mm0[0,0,1,1,2,2,3,3]
+; X86-NEXT: punpcklwd %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1]
+; X86-NEXT: punpckldq %mm0, %mm2 # mm2 = mm2[0],mm0[0]
+; X86-NEXT: paddd %mm2, %mm2
+; X86-NEXT: movq %mm2, (%eax)
+; X86-NEXT: retl
;
-; X64-SSE2-LABEL: build_v8i8_0123zzzu:
-; X64-SSE2: # %bb.0:
-; X64-SSE2-NEXT: movzbl %sil, %eax
-; X64-SSE2-NEXT: movzbl %dl, %edx
-; X64-SSE2-NEXT: shll $8, %edx
-; X64-SSE2-NEXT: orl %eax, %edx
-; X64-SSE2-NEXT: movzbl %cl, %eax
-; X64-SSE2-NEXT: shll $16, %eax
-; X64-SSE2-NEXT: orl %edx, %eax
-; X64-SSE2-NEXT: shll $24, %r8d
-; X64-SSE2-NEXT: orl %eax, %r8d
-; X64-SSE2-NEXT: movd %r8d, %xmm0
-; X64-SSE2-NEXT: movdq2q %xmm0, %mm0
-; X64-SSE2-NEXT: paddd %mm0, %mm0
-; X64-SSE2-NEXT: movq %mm0, (%rdi)
-; X64-SSE2-NEXT: retq
-;
-; X64-SSSE3-LABEL: build_v8i8_0123zzzu:
-; X64-SSSE3: # %bb.0:
-; X64-SSSE3-NEXT: movzbl %sil, %eax
-; X64-SSSE3-NEXT: movzbl %dl, %edx
-; X64-SSSE3-NEXT: shll $8, %edx
-; X64-SSSE3-NEXT: orl %eax, %edx
-; X64-SSSE3-NEXT: movzbl %cl, %eax
-; X64-SSSE3-NEXT: shll $16, %eax
-; X64-SSSE3-NEXT: orl %edx, %eax
-; X64-SSSE3-NEXT: shll $24, %r8d
-; X64-SSSE3-NEXT: orl %eax, %r8d
-; X64-SSSE3-NEXT: movd %r8d, %xmm0
-; X64-SSSE3-NEXT: movdq2q %xmm0, %mm0
-; X64-SSSE3-NEXT: paddd %mm0, %mm0
-; X64-SSSE3-NEXT: movq %mm0, (%rdi)
-; X64-SSSE3-NEXT: retq
-;
-; X64-AVX-LABEL: build_v8i8_0123zzzu:
-; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: movzbl %sil, %eax
-; X64-AVX-NEXT: vmovd %eax, %xmm0
-; X64-AVX-NEXT: vpinsrb $1, %edx, %xmm0, %xmm0
-; X64-AVX-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
-; X64-AVX-NEXT: vpinsrb $3, %r8d, %xmm0, %xmm0
-; X64-AVX-NEXT: movdq2q %xmm0, %mm0
-; X64-AVX-NEXT: paddd %mm0, %mm0
-; X64-AVX-NEXT: movq %mm0, (%rdi)
-; X64-AVX-NEXT: retq
+; X64-LABEL: build_v8i8_0123zzzu:
+; X64: # %bb.0:
+; X64-NEXT: movd %r8d, %mm0
+; X64-NEXT: movd %ecx, %mm1
+; X64-NEXT: punpcklbw %mm0, %mm1 # mm1 = mm1[0],mm0[0],mm1[1],mm0[1],mm1[2],mm0[2],mm1[3],mm0[3]
+; X64-NEXT: movd %edx, %mm0
+; X64-NEXT: movd %esi, %mm2
+; X64-NEXT: punpcklbw %mm0, %mm2 # mm2 = mm2[0],mm0[0],mm2[1],mm0[1],mm2[2],mm0[2],mm2[3],mm0[3]
+; X64-NEXT: punpcklwd %mm1, %mm2 # mm2 = mm2[0],mm1[0],mm2[1],mm1[1]
+; X64-NEXT: pxor %mm0, %mm0
+; X64-NEXT: pxor %mm1, %mm1
+; X64-NEXT: punpcklbw %mm1, %mm1 # mm1 = mm1[0,0,1,1,2,2,3,3]
+; X64-NEXT: punpcklbw %mm0, %mm0 # mm0 = mm0[0,0,1,1,2,2,3,3]
+; X64-NEXT: punpcklwd %mm1, %mm0 # mm0 = mm0[0],mm1[0],mm0[1],mm1[1]
+; X64-NEXT: punpckldq %mm0, %mm2 # mm2 = mm2[0],mm0[0]
+; X64-NEXT: paddd %mm2, %mm2
+; X64-NEXT: movq %mm2, (%rdi)
+; X64-NEXT: retq
%1 = insertelement <8 x i8> undef, i8 %a0, i32 0
%2 = insertelement <8 x i8> %1, i8 %a1, i32 1
%3 = insertelement <8 x i8> %2, i8 %a2, i32 2
@@ -626,61 +558,22 @@ define void @build_v8i8_0uuuuzzz(ptr%p0, i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4,
}
define void @build_v8i8_0zzzzzzu(ptr%p0, i8 %a0, i8 %a1, i8 %a2, i8 %a3, i8 %a4, i8 %a5, i8 %a6, i8 %a7) nounwind {
-; X86-MMX-LABEL: build_v8i8_0zzzzzzu:
-; X86-MMX: # %bb.0:
-; X86-MMX-NEXT: pushl %ebp
-; X86-MMX-NEXT: movl %esp, %ebp
-; X86-MMX-NEXT: andl $-8, %esp
-; X86-MMX-NEXT: subl $8, %esp
-; X86-MMX-NEXT: movl 8(%ebp), %eax
-; X86-MMX-NEXT: movzbl 12(%ebp), %ecx
-; X86-MMX-NEXT: movl %ecx, (%esp)
-; X86-MMX-NEXT: movzbl %al, %ecx
-; X86-MMX-NEXT: shll $24, %ecx
-; X86-MMX-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-MMX-NEXT: movq (%esp), %mm0
-; X86-MMX-NEXT: paddd %mm0, %mm0
-; X86-MMX-NEXT: movq %mm0, (%eax)
-; X86-MMX-NEXT: movl %ebp, %esp
-; X86-MMX-NEXT: popl %ebp
-; X86-MMX-NEXT: retl
-;
-; X86-SSE-LABEL: build_v8i8_0zzzzzzu:
-; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-SSE-NEXT: movd %ecx, %xmm0
-; X86-SSE-NEXT: movdq2q %xmm0, %mm0
-; X86-SSE-NEXT: paddd %mm0, %mm0
-; X86-SSE-NEXT: movq %mm0, (%eax)
-; X86-SSE-NEXT: retl
+; X86-LABEL: build_v8i8_0zzzzzzu:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movd %eax, %mm0
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: paddd %mm0, %mm0
+; X86-NEXT: movq %mm0, (%eax)
+; X86-NEXT: retl
;
-; X64-SSE2-LABEL: build_v8i8_0zzzzzzu:
-; X64-SSE2: # %bb.0:
-; X64-SSE2-NEXT: movzbl %sil, %eax
-; X64-SSE2-NEXT: movd %eax, %xmm0
-; X64-SSE2-NEXT: movdq2q %xmm0, %mm0
-; X64-SSE2-NEXT: paddd %mm0, %mm0
-; X64-SSE2-NEXT: movq %mm0, (%rdi)
-; X64-SSE2-NEXT: retq
-;
-; X64-SSSE3-LABEL: build_v8i8_0zzzzzzu:
-; X64-SSSE3: # %bb.0:
-; X64-SSSE3-NEXT: movzbl %sil, %eax
-; X64-SSSE3-NEXT: movd %eax, %xmm0
-; X64-SSSE3-NEXT: movdq2q %xmm0, %mm0
-; X64-SSSE3-NEXT: paddd %mm0, %mm0
-; X64-SSSE3-NEXT: movq %mm0, (%rdi)
-; X64-SSSE3-NEXT: retq
-;
-; X64-AVX-LABEL: build_v8i8_0zzzzzzu:
-; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: movzbl %sil, %eax
-; X64-AVX-NEXT: vmovd %eax, %xmm0
-; X64-AVX-NEXT: movdq2q %xmm0, %mm0
-; X64-AVX-NEXT: paddd %mm0, %mm0
-; X64-AVX-NEXT: movq %mm0, (%rdi)
-; X64-AVX-NEXT: retq
+; X64-LABEL: build_v8i8_0zzzzzzu:
+; X64: # %bb.0:
+; X64-NEXT: movzbl %sil, %eax
+; X64-NEXT: movd %eax, %mm0
+; X64-NEXT: paddd %mm0, %mm0
+; X64-NEXT: movq %mm0, (%rdi)
+; X64-NEXT: retq
%1 = insertelement <8 x i8> undef, i8 %a0, i32 0
%2 = insertelement <8 x i8> %1, i8 0, i32 1
%3 = insertelement <8 x i8> %2, i8 0, i32 2
diff --git a/llvm/test/CodeGen/X86/pr62286.ll b/llvm/test/CodeGen/X86/pr62286.ll
index 2d1b7fcbf0239..ce03f8fad4a19 100644
--- a/llvm/test/CodeGen/X86/pr62286.ll
+++ b/llvm/test/CodeGen/X86/pr62286.ll
@@ -28,8 +28,9 @@ define i64 @PR62286(i32 %a) {
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
@@ -58,12 +59,13 @@ define i64 @PR62286(i32 %a) {
; AVX512-LABEL: PR62286:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovd %edi, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0]
-; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512-NEXT: movw $4369, %ax # imm = 0x1111
+; AVX512-NEXT: movb $8, %al
; AVX512-NEXT: kmovd %eax, %k1
-; AVX512-NEXT: vpaddd %zmm0, %zmm0, %zmm1 {%k1}
-; AVX512-NEXT: vpmovsxdq %ymm1, %zmm0
+; AVX512-NEXT: vpexpandd %ymm0, %ymm1 {%k1} {z}
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
+; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
index 9a1ce62a45834..91d3bdc68434a 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -3051,21 +3051,16 @@ define <8 x i16> @shuffle_scalar_to_vector_extract(ptr %p0, ptr %p1, ptr %p2) {
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: psraw $8, %xmm1
; SSE2-NEXT: pextrw $7, %xmm1, %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: movsbl (%rsi), %eax
; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65531,65531,65531,65531]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: movsbl (%rdx), %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE2-NEXT: movsbl (%rdx), %ecx
-; SSE2-NEXT: movd %ecx, %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: movsbl (%rsi), %eax
-; SSE2-NEXT: movd %eax, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE2-NEXT: retq
;
@@ -3074,26 +3069,21 @@ define <8 x i16> @shuffle_scalar_to_vector_extract(ptr %p0, ptr %p1, ptr %p2) {
; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSSE3-NEXT: psraw $8, %xmm1
-; SSSE3-NEXT: movsbl (%rdx), %eax
-; SSSE3-NEXT: movd %eax, %xmm2
-; SSSE3-NEXT: pxor %xmm0, %xmm0
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSSE3-NEXT: movsbl (%rsi), %eax
-; SSSE3-NEXT: movd %eax, %xmm3
-; SSSE3-NEXT: palignr {{.*#+}} xmm3 = xmm1[14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; SSSE3-NEXT: movd %eax, %xmm2
-; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [65531,65531,65531,65531]
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; SSSE3-NEXT: palignr {{.*#+}} xmm2 = xmm1[14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10,11,12,13]
+; SSSE3-NEXT: movsbl (%rdx), %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSSE3-NEXT: pxor %xmm0, %xmm0
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
-; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSSE3-NEXT: retq
;
; SSE41-LABEL: shuffle_scalar_to_vector_extract:
; SSE41: # %bb.0:
-; SSE41-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
+; SSE41-NEXT: pmovsxbw (%rdi), %xmm0
; SSE41-NEXT: pextrw $4, %xmm0, %eax
; SSE41-NEXT: pextrw $7, %xmm0, %ecx
; SSE41-NEXT: pxor %xmm0, %xmm0
@@ -3109,8 +3099,7 @@ define <8 x i16> @shuffle_scalar_to_vector_extract(ptr %p0, ptr %p1, ptr %p2) {
;
; AVX-LABEL: shuffle_scalar_to_vector_extract:
; AVX: # %bb.0:
-; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVX-NEXT: vpmovsxbw %xmm0, %xmm0
+; AVX-NEXT: vpmovsxbw (%rdi), %xmm0
; AVX-NEXT: vpextrw $4, %xmm0, %eax
; AVX-NEXT: vpextrw $7, %xmm0, %ecx
; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
index 6b713c7a43e51..572ed314ab31d 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
@@ -3273,10 +3273,10 @@ define void @vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3(ptr %i
;
; AVX-LABEL: vec384_i8_widen_to_i128_factor16_broadcast_to_v3i128_factor3:
; AVX: # %bb.0:
-; AVX-NEXT: vmovdqa (%rsi), %xmm0
-; AVX-NEXT: vmovdqa 48(%rsi), %xmm1
-; AVX-NEXT: vpaddb 48(%rdi), %xmm1, %xmm1
-; AVX-NEXT: vpaddb (%rdi), %xmm0, %xmm0
+; AVX-NEXT: vmovdqa (%rdi), %xmm0
+; AVX-NEXT: vmovdqa 48(%rdi), %xmm1
+; AVX-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1
+; AVX-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX-NEXT: vpmovsxwq {{.*#+}} xmm2 = [18446744073709551360,18446744073709551615]
; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
More information about the llvm-branch-commits
mailing list