[llvm] 122874c - [X86] Fold scalar_to_vector(shift(x,imm)) -> vshift(scalar_to_vector(x),imm)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 5 08:17:46 PDT 2024


Author: Simon Pilgrim
Date: 2024-09-05T16:17:26+01:00
New Revision: 122874c955e06defb619b1afd4e26db482dbbf19

URL: https://github.com/llvm/llvm-project/commit/122874c955e06defb619b1afd4e26db482dbbf19
DIFF: https://github.com/llvm/llvm-project/commit/122874c955e06defb619b1afd4e26db482dbbf19.diff

LOG: [X86] Fold scalar_to_vector(shift(x,imm)) -> vshift(scalar_to_vector(x),imm)

Noticed while working on #107289

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/buildvec-insertvec.ll
    llvm/test/CodeGen/X86/known-signbits-vector.ll
    llvm/test/CodeGen/X86/load-scalar-as-vector.ll
    llvm/test/CodeGen/X86/pr44915.ll
    llvm/test/CodeGen/X86/vec_insert-5.ll
    llvm/test/CodeGen/X86/vec_shift5.ll
    llvm/test/CodeGen/X86/vector-sext.ll
    llvm/test/CodeGen/X86/vector-shuffle-combining.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 5f87ffd2f1eabf..a4ad4a1bb12013 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -57403,7 +57403,8 @@ static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
   return SDValue();
 }
 
-static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
+static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG,
+                                     const X86Subtarget &Subtarget) {
   EVT VT = N->getValueType(0);
   SDValue Src = N->getOperand(0);
   SDLoc DL(N);
@@ -57482,6 +57483,25 @@ static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
         // coverage.
       }
 
+  // Check for cases where we've ended up with a scalarized shift, typically
+  // during type legalization.
+  switch (Src.getOpcode()) {
+  case ISD::SHL:
+  case ISD::SRL:
+  case ISD::SRA:
+    if (auto *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1))) {
+      if (supportedVectorShiftWithImm(VT, Subtarget, Src.getOpcode()) &&
+          Src.hasOneUse()) {
+        SDValue SrcVec =
+            DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Src.getOperand(0));
+        unsigned Opc = getTargetVShiftUniformOpcode(Src.getOpcode(), false);
+        return getTargetVShiftByConstNode(Opc, DL, VT.getSimpleVT(), SrcVec,
+                                          Amt->getZExtValue(), DAG);
+      }
+    }
+    break;
+  }
+
   return SDValue();
 }
 
@@ -58034,7 +58054,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
   // clang-format off
   default: break;
   case ISD::SCALAR_TO_VECTOR:
-    return combineScalarToVector(N, DAG);
+    return combineScalarToVector(N, DAG, Subtarget);
   case ISD::EXTRACT_VECTOR_ELT:
   case X86ISD::PEXTRW:
   case X86ISD::PEXTRB:

diff  --git a/llvm/test/CodeGen/X86/buildvec-insertvec.ll b/llvm/test/CodeGen/X86/buildvec-insertvec.ll
index ae70b6a5a46656..4b0e5441b4abf1 100644
--- a/llvm/test/CodeGen/X86/buildvec-insertvec.ll
+++ b/llvm/test/CodeGen/X86/buildvec-insertvec.ll
@@ -726,9 +726,9 @@ define void @PR46461(i16 %x, ptr %y) {
 ; SSE-LABEL: PR46461:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movzwl %di, %eax
-; SSE-NEXT:    shrl %eax
 ; SSE-NEXT:    movd %eax, %xmm0
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE-NEXT:    psrld $1, %xmm0
 ; SSE-NEXT:    movdqa %xmm0, 48(%rsi)
 ; SSE-NEXT:    movdqa %xmm0, 32(%rsi)
 ; SSE-NEXT:    movdqa %xmm0, 16(%rsi)
@@ -738,9 +738,9 @@ define void @PR46461(i16 %x, ptr %y) {
 ; AVX1-LABEL: PR46461:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    movzwl %di, %eax
-; AVX1-NEXT:    shrl %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm0
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-NEXT:    vmovaps %ymm0, 32(%rsi)
 ; AVX1-NEXT:    vmovaps %ymm0, (%rsi)

diff  --git a/llvm/test/CodeGen/X86/known-signbits-vector.ll b/llvm/test/CodeGen/X86/known-signbits-vector.ll
index 9648daf7427b10..45b61155fe6260 100644
--- a/llvm/test/CodeGen/X86/known-signbits-vector.ll
+++ b/llvm/test/CodeGen/X86/known-signbits-vector.ll
@@ -220,11 +220,10 @@ define float @signbits_ashr_insert_ashr_extract_sitofp(i64 %a0, i64 %a1) nounwin
 ; X86:       # %bb.0:
 ; X86-NEXT:    pushl %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl %eax, %ecx
-; X86-NEXT:    sarl $30, %ecx
-; X86-NEXT:    shll $2, %eax
 ; X86-NEXT:    vmovd %eax, %xmm0
-; X86-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
+; X86-NEXT:    sarl $30, %eax
+; X86-NEXT:    vpslld $2, %xmm0, %xmm0
+; X86-NEXT:    vpinsrd $1, %eax, %xmm0, %xmm0
 ; X86-NEXT:    vpsrlq $3, %xmm0, %xmm0
 ; X86-NEXT:    vcvtdq2ps %xmm0, %xmm0
 ; X86-NEXT:    vmovss %xmm0, (%esp)

diff  --git a/llvm/test/CodeGen/X86/load-scalar-as-vector.ll b/llvm/test/CodeGen/X86/load-scalar-as-vector.ll
index 3edbcd1fe18ebd..d2359ced3e19dd 100644
--- a/llvm/test/CodeGen/X86/load-scalar-as-vector.ll
+++ b/llvm/test/CodeGen/X86/load-scalar-as-vector.ll
@@ -274,16 +274,14 @@ define <2 x i64> @lshr_op0_constant(ptr %p) nounwind {
 define <4 x i32> @lshr_op1_constant(ptr %p) nounwind {
 ; SSE-LABEL: lshr_op1_constant:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    movl (%rdi), %eax
-; SSE-NEXT:    shrl $17, %eax
-; SSE-NEXT:    movd %eax, %xmm0
+; SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    psrld $17, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: lshr_op1_constant:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    movl (%rdi), %eax
-; AVX-NEXT:    shrl $17, %eax
-; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT:    vpsrld $17, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %x = load i32, ptr %p
   %b = lshr i32 %x, 17
@@ -317,15 +315,15 @@ define <8 x i16> @ashr_op1_constant(ptr %p) nounwind {
 ; SSE-LABEL: ashr_op1_constant:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    movswl (%rdi), %eax
-; SSE-NEXT:    sarl $7, %eax
 ; SSE-NEXT:    movd %eax, %xmm0
+; SSE-NEXT:    psrad $7, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: ashr_op1_constant:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    movswl (%rdi), %eax
-; AVX-NEXT:    sarl $7, %eax
 ; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    vpsrad $7, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %x = load i16, ptr %p
   %b = ashr i16 %x, 7
@@ -474,8 +472,8 @@ define <2 x i64> @udiv_op1_constant(ptr %p) nounwind {
 ; SSE-NEXT:    shrq %rax
 ; SSE-NEXT:    movabsq $-4392081922311798003, %rcx # imm = 0xC30C30C30C30C30D
 ; SSE-NEXT:    mulq %rcx
-; SSE-NEXT:    shrq $4, %rdx
 ; SSE-NEXT:    movq %rdx, %xmm0
+; SSE-NEXT:    psrlq $4, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: udiv_op1_constant:
@@ -484,8 +482,8 @@ define <2 x i64> @udiv_op1_constant(ptr %p) nounwind {
 ; AVX-NEXT:    shrq %rax
 ; AVX-NEXT:    movabsq $-4392081922311798003, %rcx # imm = 0xC30C30C30C30C30D
 ; AVX-NEXT:    mulq %rcx
-; AVX-NEXT:    shrq $4, %rdx
 ; AVX-NEXT:    vmovq %rdx, %xmm0
+; AVX-NEXT:    vpsrlq $4, %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %x = load i64, ptr %p
   %b = udiv i64 %x, 42

diff  --git a/llvm/test/CodeGen/X86/pr44915.ll b/llvm/test/CodeGen/X86/pr44915.ll
index 1ebdd9ccb3190f..99205ab60ae11f 100644
--- a/llvm/test/CodeGen/X86/pr44915.ll
+++ b/llvm/test/CodeGen/X86/pr44915.ll
@@ -52,15 +52,14 @@ define i32 @extract3(ptr, i32) nounwind {
 ; X64-NEXT:    movl %eax, %ecx
 ; X64-NEXT:    andl $7, %ecx
 ; X64-NEXT:    movd %ecx, %xmm0
-; X64-NEXT:    movl %eax, %ecx
-; X64-NEXT:    shrl $3, %ecx
-; X64-NEXT:    andl $7, %ecx
-; X64-NEXT:    movd %ecx, %xmm2
-; X64-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; X64-NEXT:    movd %eax, %xmm2
+; X64-NEXT:    shrl $3, %eax
+; X64-NEXT:    andl $7, %eax
+; X64-NEXT:    movd %eax, %xmm3
+; X64-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
 ; X64-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X64-NEXT:    shrl $12, %eax
-; X64-NEXT:    movd %eax, %xmm1
-; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-NEXT:    psrld $12, %xmm2
+; X64-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; X64-NEXT:    movdqa %xmm0, -24(%rsp)
 ; X64-NEXT:    andl $7, %esi
 ; X64-NEXT:    movzwl -24(%rsp,%rsi,2), %eax

diff  --git a/llvm/test/CodeGen/X86/vec_insert-5.ll b/llvm/test/CodeGen/X86/vec_insert-5.ll
index 176ae81e08a764..91743898545ee1 100644
--- a/llvm/test/CodeGen/X86/vec_insert-5.ll
+++ b/llvm/test/CodeGen/X86/vec_insert-5.ll
@@ -9,17 +9,16 @@ define void  @t1(i32 %a, ptr %P) nounwind {
 ; X86-LABEL: t1:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    shll $12, %ecx
-; X86-NEXT:    movd %ecx, %xmm0
+; X86-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    pslld $12, %xmm0
 ; X86-NEXT:    psllq $32, %xmm0
 ; X86-NEXT:    movq %xmm0, (%eax)
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: t1:
 ; X64:       # %bb.0:
-; X64-NEXT:    shll $12, %edi
 ; X64-NEXT:    movd %edi, %xmm0
+; X64-NEXT:    pslld $12, %xmm0
 ; X64-NEXT:    psllq $32, %xmm0
 ; X64-NEXT:    movq %xmm0, (%rsi)
 ; X64-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/vec_shift5.ll b/llvm/test/CodeGen/X86/vec_shift5.ll
index f8bc6b01c70a84..2ab00ea96ada1f 100644
--- a/llvm/test/CodeGen/X86/vec_shift5.ll
+++ b/llvm/test/CodeGen/X86/vec_shift5.ll
@@ -290,13 +290,15 @@ define <4 x i32> @extelt0_twice_sub_pslli_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x
 ; This would crash because the scalar shift amount has a 
diff erent type than the shift result.
 
 define <2 x i8> @PR58661(<2 x i8> %a0) {
-; CHECK-LABEL: PR58661:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    psrlw $8, %xmm0
-; CHECK-NEXT:    movd %xmm0, %eax
-; CHECK-NEXT:    shll $8, %eax
-; CHECK-NEXT:    movd %eax, %xmm0
-; CHECK-NEXT:    ret{{[l|q]}}
+; X86-LABEL: PR58661:
+; X86:       # %bb.0:
+; X86-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: PR58661:
+; X64:       # %bb.0:
+; X64-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-NEXT:    retq
   %shuffle = shufflevector <2 x i8> %a0, <2 x i8> <i8 poison, i8 0>, <2 x i32> <i32 1, i32 3>
   %x = bitcast <2 x i8> %shuffle to i16
   %shl = shl nuw i16 %x, 8

diff  --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll
index dc9e69137a8a7e..d44b11f4ca1da0 100644
--- a/llvm/test/CodeGen/X86/vector-sext.ll
+++ b/llvm/test/CodeGen/X86/vector-sext.ll
@@ -3601,15 +3601,14 @@ define <4 x i32> @sext_4i17_to_4i32(ptr %ptr) {
 ; SSE2-LABEL: sext_4i17_to_4i32:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq (%rdi), %rax
-; SSE2-NEXT:    movl %eax, %ecx
-; SSE2-NEXT:    shll $15, %ecx
-; SSE2-NEXT:    sarl $15, %ecx
-; SSE2-NEXT:    movd %ecx, %xmm0
+; SSE2-NEXT:    movd %eax, %xmm0
+; SSE2-NEXT:    pslld $15, %xmm0
+; SSE2-NEXT:    psrad $15, %xmm0
 ; SSE2-NEXT:    movq %rax, %rcx
 ; SSE2-NEXT:    shrq $17, %rcx
-; SSE2-NEXT:    shll $15, %ecx
-; SSE2-NEXT:    sarl $15, %ecx
 ; SSE2-NEXT:    movd %ecx, %xmm1
+; SSE2-NEXT:    pslld $15, %xmm1
+; SSE2-NEXT:    psrad $15, %xmm1
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    movl 8(%rdi), %ecx
 ; SSE2-NEXT:    shll $28, %ecx
@@ -3617,12 +3616,12 @@ define <4 x i32> @sext_4i17_to_4i32(ptr %ptr) {
 ; SSE2-NEXT:    shrq $51, %rdx
 ; SSE2-NEXT:    shll $15, %edx
 ; SSE2-NEXT:    orl %ecx, %edx
-; SSE2-NEXT:    sarl $15, %edx
 ; SSE2-NEXT:    movd %edx, %xmm1
+; SSE2-NEXT:    psrad $15, %xmm1
 ; SSE2-NEXT:    shrq $34, %rax
-; SSE2-NEXT:    shll $15, %eax
-; SSE2-NEXT:    sarl $15, %eax
 ; SSE2-NEXT:    movd %eax, %xmm2
+; SSE2-NEXT:    pslld $15, %xmm2
+; SSE2-NEXT:    psrad $15, %xmm2
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSE2-NEXT:    retq
@@ -3630,15 +3629,14 @@ define <4 x i32> @sext_4i17_to_4i32(ptr %ptr) {
 ; SSSE3-LABEL: sext_4i17_to_4i32:
 ; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movq (%rdi), %rax
-; SSSE3-NEXT:    movl %eax, %ecx
-; SSSE3-NEXT:    shll $15, %ecx
-; SSSE3-NEXT:    sarl $15, %ecx
-; SSSE3-NEXT:    movd %ecx, %xmm0
+; SSSE3-NEXT:    movd %eax, %xmm0
+; SSSE3-NEXT:    pslld $15, %xmm0
+; SSSE3-NEXT:    psrad $15, %xmm0
 ; SSSE3-NEXT:    movq %rax, %rcx
 ; SSSE3-NEXT:    shrq $17, %rcx
-; SSSE3-NEXT:    shll $15, %ecx
-; SSSE3-NEXT:    sarl $15, %ecx
 ; SSSE3-NEXT:    movd %ecx, %xmm1
+; SSSE3-NEXT:    pslld $15, %xmm1
+; SSSE3-NEXT:    psrad $15, %xmm1
 ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSSE3-NEXT:    movl 8(%rdi), %ecx
 ; SSSE3-NEXT:    shll $28, %ecx
@@ -3646,12 +3644,12 @@ define <4 x i32> @sext_4i17_to_4i32(ptr %ptr) {
 ; SSSE3-NEXT:    shrq $51, %rdx
 ; SSSE3-NEXT:    shll $15, %edx
 ; SSSE3-NEXT:    orl %ecx, %edx
-; SSSE3-NEXT:    sarl $15, %edx
 ; SSSE3-NEXT:    movd %edx, %xmm1
+; SSSE3-NEXT:    psrad $15, %xmm1
 ; SSSE3-NEXT:    shrq $34, %rax
-; SSSE3-NEXT:    shll $15, %eax
-; SSSE3-NEXT:    sarl $15, %eax
 ; SSSE3-NEXT:    movd %eax, %xmm2
+; SSSE3-NEXT:    pslld $15, %xmm2
+; SSSE3-NEXT:    psrad $15, %xmm2
 ; SSSE3-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 ; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
 ; SSSE3-NEXT:    retq
@@ -3663,10 +3661,9 @@ define <4 x i32> @sext_4i17_to_4i32(ptr %ptr) {
 ; SSE41-NEXT:    shrq $17, %rcx
 ; SSE41-NEXT:    shll $15, %ecx
 ; SSE41-NEXT:    sarl $15, %ecx
-; SSE41-NEXT:    movl %eax, %edx
-; SSE41-NEXT:    shll $15, %edx
-; SSE41-NEXT:    sarl $15, %edx
-; SSE41-NEXT:    movd %edx, %xmm0
+; SSE41-NEXT:    movd %eax, %xmm0
+; SSE41-NEXT:    pslld $15, %xmm0
+; SSE41-NEXT:    psrad $15, %xmm0
 ; SSE41-NEXT:    pinsrd $1, %ecx, %xmm0
 ; SSE41-NEXT:    movq %rax, %rcx
 ; SSE41-NEXT:    shrq $34, %rcx
@@ -3689,10 +3686,9 @@ define <4 x i32> @sext_4i17_to_4i32(ptr %ptr) {
 ; AVX-NEXT:    shrq $17, %rcx
 ; AVX-NEXT:    shll $15, %ecx
 ; AVX-NEXT:    sarl $15, %ecx
-; AVX-NEXT:    movl %eax, %edx
-; AVX-NEXT:    shll $15, %edx
-; AVX-NEXT:    sarl $15, %edx
-; AVX-NEXT:    vmovd %edx, %xmm0
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    vpslld $15, %xmm0, %xmm0
+; AVX-NEXT:    vpsrad $15, %xmm0, %xmm0
 ; AVX-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
 ; AVX-NEXT:    movq %rax, %rcx
 ; AVX-NEXT:    shrq $34, %rcx
@@ -3711,25 +3707,24 @@ define <4 x i32> @sext_4i17_to_4i32(ptr %ptr) {
 ; X86-SSE2-LABEL: sext_4i17_to_4i32:
 ; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-SSE2-NEXT:    movl (%edx), %ecx
-; X86-SSE2-NEXT:    movl 4(%edx), %eax
+; X86-SSE2-NEXT:    movl (%edx), %eax
+; X86-SSE2-NEXT:    movl 4(%edx), %ecx
 ; X86-SSE2-NEXT:    movl 8(%edx), %edx
-; X86-SSE2-NEXT:    shldl $13, %eax, %edx
-; X86-SSE2-NEXT:    shll $15, %edx
-; X86-SSE2-NEXT:    sarl $15, %edx
+; X86-SSE2-NEXT:    shldl $13, %ecx, %edx
 ; X86-SSE2-NEXT:    movd %edx, %xmm0
-; X86-SSE2-NEXT:    movl %eax, %edx
-; X86-SSE2-NEXT:    shll $13, %edx
-; X86-SSE2-NEXT:    sarl $15, %edx
-; X86-SSE2-NEXT:    movd %edx, %xmm1
+; X86-SSE2-NEXT:    pslld $15, %xmm0
+; X86-SSE2-NEXT:    psrad $15, %xmm0
+; X86-SSE2-NEXT:    movd %ecx, %xmm1
+; X86-SSE2-NEXT:    pslld $13, %xmm1
+; X86-SSE2-NEXT:    psrad $15, %xmm1
 ; X86-SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; X86-SSE2-NEXT:    shldl $15, %ecx, %eax
-; X86-SSE2-NEXT:    shll $15, %ecx
-; X86-SSE2-NEXT:    sarl $15, %ecx
-; X86-SSE2-NEXT:    movd %ecx, %xmm0
-; X86-SSE2-NEXT:    shll $15, %eax
-; X86-SSE2-NEXT:    sarl $15, %eax
-; X86-SSE2-NEXT:    movd %eax, %xmm2
+; X86-SSE2-NEXT:    movd %eax, %xmm0
+; X86-SSE2-NEXT:    pslld $15, %xmm0
+; X86-SSE2-NEXT:    psrad $15, %xmm0
+; X86-SSE2-NEXT:    shldl $15, %eax, %ecx
+; X86-SSE2-NEXT:    movd %ecx, %xmm2
+; X86-SSE2-NEXT:    pslld $15, %xmm2
+; X86-SSE2-NEXT:    psrad $15, %xmm2
 ; X86-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
 ; X86-SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; X86-SSE2-NEXT:    retl
@@ -3748,9 +3743,9 @@ define <4 x i32> @sext_4i17_to_4i32(ptr %ptr) {
 ; X86-SSE41-NEXT:    shldl $15, %eax, %ecx
 ; X86-SSE41-NEXT:    shll $15, %ecx
 ; X86-SSE41-NEXT:    sarl $15, %ecx
-; X86-SSE41-NEXT:    shll $15, %eax
-; X86-SSE41-NEXT:    sarl $15, %eax
 ; X86-SSE41-NEXT:    movd %eax, %xmm0
+; X86-SSE41-NEXT:    pslld $15, %xmm0
+; X86-SSE41-NEXT:    psrad $15, %xmm0
 ; X86-SSE41-NEXT:    pinsrd $1, %ecx, %xmm0
 ; X86-SSE41-NEXT:    shll $13, %esi
 ; X86-SSE41-NEXT:    sarl $15, %esi

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
index 923af983f1d47b..04262b42492560 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -3537,47 +3537,43 @@ define <16 x i8> @PR107289(<16 x i8> %0) {
 ; SSE2-LABEL: PR107289:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    movq %xmm0, %rax
-; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT:    movq %xmm0, %rcx
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSE2-NEXT:    movq %xmm1, %rcx
 ; SSE2-NEXT:    shldq $8, %rax, %rcx
-; SSE2-NEXT:    shlq $8, %rax
 ; SSE2-NEXT:    movq %rcx, %xmm1
-; SSE2-NEXT:    movq %rax, %xmm0
+; SSE2-NEXT:    psllq $8, %xmm0
 ; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: PR107289:
 ; SSSE3:       # %bb.0:
 ; SSSE3-NEXT:    movq %xmm0, %rax
-; SSSE3-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSSE3-NEXT:    movq %xmm0, %rcx
+; SSSE3-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; SSSE3-NEXT:    movq %xmm1, %rcx
 ; SSSE3-NEXT:    shldq $8, %rax, %rcx
-; SSSE3-NEXT:    shlq $8, %rax
 ; SSSE3-NEXT:    movq %rcx, %xmm1
-; SSSE3-NEXT:    movq %rax, %xmm0
+; SSSE3-NEXT:    psllq $8, %xmm0
 ; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSSE3-NEXT:    retq
 ;
 ; SSE41-LABEL: PR107289:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    pextrq $1, %xmm0, %rax
-; SSE41-NEXT:    movq %xmm0, %rcx
-; SSE41-NEXT:    shldq $8, %rcx, %rax
-; SSE41-NEXT:    shlq $8, %rcx
-; SSE41-NEXT:    movq %rax, %xmm1
-; SSE41-NEXT:    movq %rcx, %xmm0
+; SSE41-NEXT:    movq %xmm0, %rax
+; SSE41-NEXT:    pextrq $1, %xmm0, %rcx
+; SSE41-NEXT:    shldq $8, %rax, %rcx
+; SSE41-NEXT:    movq %rcx, %xmm1
+; SSE41-NEXT:    psllq $8, %xmm0
 ; SSE41-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: PR107289:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX-NEXT:    vmovq %xmm0, %rcx
-; AVX-NEXT:    shldq $8, %rcx, %rax
-; AVX-NEXT:    shlq $8, %rcx
-; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    vmovq %xmm0, %rax
+; AVX-NEXT:    vpextrq $1, %xmm0, %rcx
+; AVX-NEXT:    shldq $8, %rax, %rcx
 ; AVX-NEXT:    vmovq %rcx, %xmm1
-; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT:    vpsllq $8, %xmm0, %xmm0
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX-NEXT:    retq
   %src = bitcast <16 x i8> %0 to i128
   %shl = shl i128 %src, 8


        


More information about the llvm-commits mailing list