[llvm] 26e742f - [x86][CGP] improve sinking of splatted vector shift amount operand

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Thu May 14 05:54:47 PDT 2020


Author: Sanjay Patel
Date: 2020-05-14T08:36:03-04:00
New Revision: 26e742fd848b27ff925eab23e4d2a5675b418721

URL: https://github.com/llvm/llvm-project/commit/26e742fd848b27ff925eab23e4d2a5675b418721
DIFF: https://github.com/llvm/llvm-project/commit/26e742fd848b27ff925eab23e4d2a5675b418721.diff

LOG: [x86][CGP] improve sinking of splatted vector shift amount operand

Expands on the enablement of the shouldSinkOperands() TLI hook in:
D79718

The last codegen/IR test diff shows what I suspected could happen - we were
sinking all splat shift operands into a loop. But that's not what we want in
general; we only want to sink the *shift amount* operand if it is a splat.

Differential Revision: https://reviews.llvm.org/D79827

Added: 
    

Modified: 
    llvm/lib/CodeGen/CodeGenPrepare.cpp
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/vector-shift-by-select-loop.ll
    llvm/test/Transforms/CodeGenPrepare/X86/vec-shift.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 623bf721b19e..8723e26d47f8 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -392,8 +392,6 @@ class TypePromotionTransaction;
     bool optimizeLoadExt(LoadInst *Load);
     bool optimizeShiftInst(BinaryOperator *BO);
     bool optimizeSelectInst(SelectInst *SI);
-    bool sinkShuffleVectorToShift(ShuffleVectorInst *SVI);
-    bool convertSplatType(ShuffleVectorInst *SVI);
     bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
     bool optimizeSwitchInst(SwitchInst *SI);
     bool optimizeExtractElementInst(Instruction *Inst);
@@ -6417,66 +6415,10 @@ bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
   return true;
 }
 
-/// Some targets have expensive vector shifts if the lanes aren't all the same
-/// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases
-/// it's often worth sinking a shufflevector splat down to its use so that
-/// codegen can spot all lanes are identical.
-bool CodeGenPrepare::sinkShuffleVectorToShift(ShuffleVectorInst *SVI) {
-  BasicBlock *DefBB = SVI->getParent();
-
-  // Only do this xform if variable vector shifts are particularly expensive.
-  if (!TLI->isVectorShiftByScalarCheap(SVI->getType()))
-    return false;
-
-  // We only expect better codegen by sinking a shuffle if we can recognise a
-  // constant splat.
-  if (getSplatIndex(SVI->getShuffleMask()) < 0)
-    return false;
-
-  // InsertedShuffles - Only insert a shuffle in each block once.
-  DenseMap<BasicBlock*, Instruction*> InsertedShuffles;
-
-  bool MadeChange = false;
-  for (User *U : SVI->users()) {
-    Instruction *UI = cast<Instruction>(U);
-
-    // Figure out which BB this ext is used in.
-    BasicBlock *UserBB = UI->getParent();
-    if (UserBB == DefBB) continue;
-
-    // For now only apply this when the splat is used by a shift instruction.
-    if (!UI->isShift()) continue;
-
-    // Everything checks out, sink the shuffle if the user's block doesn't
-    // already have a copy.
-    Instruction *&InsertedShuffle = InsertedShuffles[UserBB];
-
-    if (!InsertedShuffle) {
-      BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
-      assert(InsertPt != UserBB->end());
-      InsertedShuffle =
-          new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1),
-                                SVI->getShuffleMask(), "", &*InsertPt);
-      InsertedShuffle->setDebugLoc(SVI->getDebugLoc());
-    }
-
-    UI->replaceUsesOfWith(SVI, InsertedShuffle);
-    MadeChange = true;
-  }
-
-  // If we removed all uses, nuke the shuffle.
-  if (SVI->use_empty()) {
-    SVI->eraseFromParent();
-    MadeChange = true;
-  }
-
-  return MadeChange;
-}
-
 /// Some targets only accept certain types for splat inputs. For example a VDUP
 /// in MVE takes a GPR (integer) register, and the instruction that incorporate
 /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register.
-bool CodeGenPrepare::convertSplatType(ShuffleVectorInst *SVI) {
+bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
   if (!match(SVI,
              m_ShuffleVector(m_InsertElement(m_Undef(), m_Value(), m_ZeroInt()),
                              m_Undef(), m_ZeroMask())))
@@ -6516,14 +6458,6 @@ bool CodeGenPrepare::convertSplatType(ShuffleVectorInst *SVI) {
   return true;
 }
 
-bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
-  if (sinkShuffleVectorToShift(SVI))
-    return true;
-  if (convertSplatType(SVI))
-    return true;
-  return false;
-}
-
 bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
   // If the operands of I can be folded into a target instruction together with
   // I, duplicate and sink them.

diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 7cfdcf22ac81..0750b14a95b6 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -30680,18 +30680,23 @@ bool X86TargetLowering::shouldSinkOperands(Instruction *I,
   // A uniform shift amount in a vector shift or funnel shift may be much
   // cheaper than a generic variable vector shift, so make that pattern visible
   // to SDAG by sinking the shuffle instruction next to the shift.
-  // TODO: This should handle normal shift opcodes too.
-  if (auto *II = dyn_cast<IntrinsicInst>(I)) {
-    Intrinsic::ID ID = II->getIntrinsicID();
-    if (ID == Intrinsic::fshl || ID == Intrinsic::fshr) {
-      // The shift amount operand for these intrinsics is operand 2.
-      auto *Shuf = dyn_cast<ShuffleVectorInst>(II->getOperand(2));
-      if (Shuf && getSplatIndex(Shuf->getShuffleMask()) >= 0 &&
-          isVectorShiftByScalarCheap(I->getType())) {
-        Ops.push_back(&I->getOperandUse(2));
-        return true;
-      }
-    }
+  int ShiftAmountOpNum = -1;
+  if (I->isShift())
+    ShiftAmountOpNum = 1;
+  else if (auto *II = dyn_cast<IntrinsicInst>(I)) {
+    if (II->getIntrinsicID() == Intrinsic::fshl ||
+        II->getIntrinsicID() == Intrinsic::fshr)
+      ShiftAmountOpNum = 2;
+  }
+
+  if (ShiftAmountOpNum == -1)
+    return false;
+
+  auto *Shuf = dyn_cast<ShuffleVectorInst>(I->getOperand(ShiftAmountOpNum));
+  if (Shuf && getSplatIndex(Shuf->getShuffleMask()) >= 0 &&
+      isVectorShiftByScalarCheap(I->getType())) {
+    Ops.push_back(&I->getOperandUse(ShiftAmountOpNum));
+    return true;
   }
 
   return false;

diff  --git a/llvm/test/CodeGen/X86/vector-shift-by-select-loop.ll b/llvm/test/CodeGen/X86/vector-shift-by-select-loop.ll
index 9536d540496d..26f8a68f544e 100644
--- a/llvm/test/CodeGen/X86/vector-shift-by-select-loop.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-by-select-loop.ll
@@ -28,10 +28,10 @@ define void @vector_variable_shift_left_loop(i32* nocapture %arr, i8* nocapture
 ; SSE-NEXT:    movl %eax, %edx
 ; SSE-NEXT:    andl $-32, %edx
 ; SSE-NEXT:    movd %r9d, %xmm0
-; SSE-NEXT:    movd %r8d, %xmm1
+; SSE-NEXT:    movd %r8d, %xmm2
 ; SSE-NEXT:    xorl %ecx, %ecx
-; SSE-NEXT:    pmovzxdq {{.*#+}} xmm14 = xmm1[0],zero,xmm1[1],zero
-; SSE-NEXT:    pmovzxdq {{.*#+}} xmm15 = xmm0[0],zero,xmm0[1],zero
+; SSE-NEXT:    pmovzxdq {{.*#+}} xmm14 = xmm0[0],zero,xmm0[1],zero
+; SSE-NEXT:    pmovzxdq {{.*#+}} xmm15 = xmm2[0],zero,xmm2[1],zero
 ; SSE-NEXT:    .p2align 4, 0x90
 ; SSE-NEXT:  .LBB0_4: # %vector.body
 ; SSE-NEXT:    # =>This Inner Loop Header: Depth=1
@@ -53,57 +53,57 @@ define void @vector_variable_shift_left_loop(i32* nocapture %arr, i8* nocapture
 ; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm4[1,1,2,3]
 ; SSE-NEXT:    pmovsxbd %xmm3, %xmm2
 ; SSE-NEXT:    pcmpeqb %xmm1, %xmm5
-; SSE-NEXT:    pmovsxbd %xmm5, %xmm8
+; SSE-NEXT:    pmovsxbd %xmm5, %xmm9
 ; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[1,1,2,3]
-; SSE-NEXT:    pmovsxbd %xmm3, %xmm9
+; SSE-NEXT:    pmovsxbd %xmm3, %xmm10
 ; SSE-NEXT:    movdqu 16(%rdi,%rcx,4), %xmm3
 ; SSE-NEXT:    movdqa %xmm3, %xmm4
-; SSE-NEXT:    pslld %xmm15, %xmm4
-; SSE-NEXT:    pslld %xmm14, %xmm3
+; SSE-NEXT:    pslld %xmm14, %xmm4
+; SSE-NEXT:    pslld %xmm15, %xmm3
 ; SSE-NEXT:    blendvps %xmm0, %xmm4, %xmm3
-; SSE-NEXT:    movdqu (%rdi,%rcx,4), %xmm10
-; SSE-NEXT:    movdqa %xmm10, %xmm5
-; SSE-NEXT:    pslld %xmm15, %xmm5
-; SSE-NEXT:    pslld %xmm14, %xmm10
+; SSE-NEXT:    movdqu (%rdi,%rcx,4), %xmm8
+; SSE-NEXT:    movdqa %xmm8, %xmm5
+; SSE-NEXT:    pslld %xmm14, %xmm5
+; SSE-NEXT:    pslld %xmm15, %xmm8
 ; SSE-NEXT:    movdqa %xmm7, %xmm0
-; SSE-NEXT:    blendvps %xmm0, %xmm5, %xmm10
+; SSE-NEXT:    blendvps %xmm0, %xmm5, %xmm8
 ; SSE-NEXT:    movdqu 48(%rdi,%rcx,4), %xmm12
 ; SSE-NEXT:    movdqa %xmm12, %xmm5
-; SSE-NEXT:    pslld %xmm15, %xmm5
-; SSE-NEXT:    pslld %xmm14, %xmm12
+; SSE-NEXT:    pslld %xmm14, %xmm5
+; SSE-NEXT:    pslld %xmm15, %xmm12
 ; SSE-NEXT:    movdqa %xmm6, %xmm0
 ; SSE-NEXT:    blendvps %xmm0, %xmm5, %xmm12
 ; SSE-NEXT:    movdqu 32(%rdi,%rcx,4), %xmm6
 ; SSE-NEXT:    movdqa %xmm6, %xmm5
-; SSE-NEXT:    pslld %xmm15, %xmm5
-; SSE-NEXT:    pslld %xmm14, %xmm6
+; SSE-NEXT:    pslld %xmm14, %xmm5
+; SSE-NEXT:    pslld %xmm15, %xmm6
 ; SSE-NEXT:    movdqa %xmm13, %xmm0
 ; SSE-NEXT:    blendvps %xmm0, %xmm5, %xmm6
 ; SSE-NEXT:    movdqu 80(%rdi,%rcx,4), %xmm1
 ; SSE-NEXT:    movdqa %xmm1, %xmm5
-; SSE-NEXT:    pslld %xmm15, %xmm5
-; SSE-NEXT:    pslld %xmm14, %xmm1
+; SSE-NEXT:    pslld %xmm14, %xmm5
+; SSE-NEXT:    pslld %xmm15, %xmm1
 ; SSE-NEXT:    movdqa %xmm2, %xmm0
 ; SSE-NEXT:    blendvps %xmm0, %xmm5, %xmm1
 ; SSE-NEXT:    movdqu 64(%rdi,%rcx,4), %xmm5
 ; SSE-NEXT:    movdqa %xmm5, %xmm2
-; SSE-NEXT:    pslld %xmm15, %xmm2
-; SSE-NEXT:    pslld %xmm14, %xmm5
+; SSE-NEXT:    pslld %xmm14, %xmm2
+; SSE-NEXT:    pslld %xmm15, %xmm5
 ; SSE-NEXT:    movdqa %xmm11, %xmm0
 ; SSE-NEXT:    blendvps %xmm0, %xmm2, %xmm5
 ; SSE-NEXT:    movdqu 112(%rdi,%rcx,4), %xmm2
 ; SSE-NEXT:    movdqa %xmm2, %xmm4
-; SSE-NEXT:    pslld %xmm15, %xmm4
-; SSE-NEXT:    pslld %xmm14, %xmm2
-; SSE-NEXT:    movdqa %xmm9, %xmm0
+; SSE-NEXT:    pslld %xmm14, %xmm4
+; SSE-NEXT:    pslld %xmm15, %xmm2
+; SSE-NEXT:    movdqa %xmm10, %xmm0
 ; SSE-NEXT:    blendvps %xmm0, %xmm4, %xmm2
 ; SSE-NEXT:    movdqu 96(%rdi,%rcx,4), %xmm4
 ; SSE-NEXT:    movdqa %xmm4, %xmm7
-; SSE-NEXT:    pslld %xmm15, %xmm7
-; SSE-NEXT:    pslld %xmm14, %xmm4
-; SSE-NEXT:    movdqa %xmm8, %xmm0
+; SSE-NEXT:    pslld %xmm14, %xmm7
+; SSE-NEXT:    pslld %xmm15, %xmm4
+; SSE-NEXT:    movdqa %xmm9, %xmm0
 ; SSE-NEXT:    blendvps %xmm0, %xmm7, %xmm4
-; SSE-NEXT:    movups %xmm10, (%rdi,%rcx,4)
+; SSE-NEXT:    movups %xmm8, (%rdi,%rcx,4)
 ; SSE-NEXT:    movups %xmm3, 16(%rdi,%rcx,4)
 ; SSE-NEXT:    movups %xmm6, 32(%rdi,%rcx,4)
 ; SSE-NEXT:    movups %xmm12, 48(%rdi,%rcx,4)
@@ -139,7 +139,6 @@ define void @vector_variable_shift_left_loop(i32* nocapture %arr, i8* nocapture
 ;
 ; AVX1-LABEL: vector_variable_shift_left_loop:
 ; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    subq $24, %rsp
 ; AVX1-NEXT:    testl %edx, %edx
 ; AVX1-NEXT:    jle .LBB0_9
 ; AVX1-NEXT:  # %bb.1: # %for.body.preheader
@@ -156,88 +155,86 @@ define void @vector_variable_shift_left_loop(i32* nocapture %arr, i8* nocapture
 ; AVX1-NEXT:    vmovd %r9d, %xmm0
 ; AVX1-NEXT:    vmovd %r8d, %xmm1
 ; AVX1-NEXT:    xorl %ecx, %ecx
-; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
 ; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
 ; AVX1-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm13 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm14 = xmm1[0],zero,xmm1[1],zero
 ; AVX1-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm15 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT:    vpxor %xmm11, %xmm11, %xmm11
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX1-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm11 = xmm1[0],zero,xmm1[1],zero
 ; AVX1-NEXT:    .p2align 4, 0x90
 ; AVX1-NEXT:  .LBB0_4: # %vector.body
 ; AVX1-NEXT:    # =>This Inner Loop Header: Depth=1
-; AVX1-NEXT:    vpmovzxdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm1 = mem[0],zero,mem[1],zero
-; AVX1-NEXT:    vpmovzxdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; AVX1-NEXT:    # xmm2 = mem[0],zero,mem[1],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
 ; AVX1-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX1-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
-; AVX1-NEXT:    vmovq {{.*#+}} xmm5 = mem[0],zero
-; AVX1-NEXT:    vmovq {{.*#+}} xmm6 = mem[0],zero
-; AVX1-NEXT:    vpcmpeqb %xmm3, %xmm11, %xmm3
-; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm7
+; AVX1-NEXT:    vpxor %xmm12, %xmm12, %xmm12
+; AVX1-NEXT:    vpcmpeqb %xmm1, %xmm12, %xmm1
+; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; AVX1-NEXT:    vpmovsxbd %xmm1, %xmm1
+; AVX1-NEXT:    vpcmpeqb %xmm2, %xmm12, %xmm2
+; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm6
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; AVX1-NEXT:    vpmovsxbd %xmm2, %xmm2
+; AVX1-NEXT:    vpcmpeqb %xmm3, %xmm12, %xmm3
+; AVX1-NEXT:    vpmovzxdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm7 = mem[0],zero,mem[1],zero
+; AVX1-NEXT:    vmovdqu (%rdi,%rcx,4), %xmm8
+; AVX1-NEXT:    vpslld %xmm7, %xmm8, %xmm9
+; AVX1-NEXT:    vpmovzxdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; AVX1-NEXT:    # xmm10 = mem[0],zero,mem[1],zero
+; AVX1-NEXT:    vpslld %xmm10, %xmm8, %xmm0
+; AVX1-NEXT:    vblendvps %xmm5, %xmm9, %xmm0, %xmm8
+; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm5
 ; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
 ; AVX1-NEXT:    vpmovsxbd %xmm3, %xmm3
-; AVX1-NEXT:    vpcmpeqb %xmm4, %xmm11, %xmm4
-; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm8
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
-; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm4
-; AVX1-NEXT:    vpcmpeqb %xmm5, %xmm11, %xmm5
-; AVX1-NEXT:    vmovdqu (%rdi,%rcx,4), %xmm9
-; AVX1-NEXT:    vpslld %xmm2, %xmm9, %xmm10
-; AVX1-NEXT:    vpslld %xmm1, %xmm9, %xmm0
-; AVX1-NEXT:    vblendvps %xmm7, %xmm10, %xmm0, %xmm9
-; AVX1-NEXT:    vpmovsxbd %xmm5, %xmm7
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
-; AVX1-NEXT:    vpmovsxbd %xmm5, %xmm5
-; AVX1-NEXT:    vpcmpeqb %xmm6, %xmm11, %xmm6
+; AVX1-NEXT:    vpcmpeqb %xmm4, %xmm12, %xmm4
 ; AVX1-NEXT:    vmovdqu 16(%rdi,%rcx,4), %xmm0
-; AVX1-NEXT:    vpslld %xmm2, %xmm0, %xmm2
-; AVX1-NEXT:    vpslld %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpmovsxbd %xmm6, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,3]
-; AVX1-NEXT:    vpmovsxbd %xmm6, %xmm6
-; AVX1-NEXT:    vblendvps %xmm3, %xmm2, %xmm0, %xmm10
-; AVX1-NEXT:    vmovdqu 32(%rdi,%rcx,4), %xmm2
-; AVX1-NEXT:    vpslld %xmm15, %xmm2, %xmm3
-; AVX1-NEXT:    vpslld %xmm14, %xmm2, %xmm2
-; AVX1-NEXT:    vblendvps %xmm8, %xmm3, %xmm2, %xmm8
-; AVX1-NEXT:    vmovdqu 48(%rdi,%rcx,4), %xmm3
-; AVX1-NEXT:    vpslld %xmm15, %xmm3, %xmm0
-; AVX1-NEXT:    vpslld %xmm14, %xmm3, %xmm3
-; AVX1-NEXT:    vblendvps %xmm4, %xmm0, %xmm3, %xmm0
-; AVX1-NEXT:    vmovdqu 64(%rdi,%rcx,4), %xmm3
-; AVX1-NEXT:    vpslld %xmm13, %xmm3, %xmm4
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-NEXT:    vpslld %xmm2, %xmm3, %xmm3
-; AVX1-NEXT:    vblendvps %xmm7, %xmm4, %xmm3, %xmm3
-; AVX1-NEXT:    vmovdqu 80(%rdi,%rcx,4), %xmm4
-; AVX1-NEXT:    vpslld %xmm13, %xmm4, %xmm7
-; AVX1-NEXT:    vpslld %xmm2, %xmm4, %xmm4
-; AVX1-NEXT:    vblendvps %xmm5, %xmm7, %xmm4, %xmm4
-; AVX1-NEXT:    vmovdqu 96(%rdi,%rcx,4), %xmm5
-; AVX1-NEXT:    vpslld %xmm12, %xmm5, %xmm7
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-NEXT:    vpslld %xmm2, %xmm5, %xmm5
-; AVX1-NEXT:    vblendvps %xmm1, %xmm7, %xmm5, %xmm1
-; AVX1-NEXT:    vmovdqu 112(%rdi,%rcx,4), %xmm5
-; AVX1-NEXT:    vpslld %xmm12, %xmm5, %xmm7
-; AVX1-NEXT:    vpslld %xmm2, %xmm5, %xmm5
-; AVX1-NEXT:    vblendvps %xmm6, %xmm7, %xmm5, %xmm5
-; AVX1-NEXT:    vmovups %xmm9, (%rdi,%rcx,4)
+; AVX1-NEXT:    vpslld %xmm7, %xmm0, %xmm7
+; AVX1-NEXT:    vpslld %xmm10, %xmm0, %xmm0
+; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm9
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
+; AVX1-NEXT:    vpmovsxbd %xmm4, %xmm12
+; AVX1-NEXT:    vblendvps %xmm1, %xmm7, %xmm0, %xmm10
+; AVX1-NEXT:    vmovdqu 32(%rdi,%rcx,4), %xmm1
+; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-NEXT:    vpslld %xmm0, %xmm1, %xmm7
+; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX1-NEXT:    vpslld %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvps %xmm6, %xmm7, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqu 48(%rdi,%rcx,4), %xmm6
+; AVX1-NEXT:    vpslld %xmm0, %xmm6, %xmm7
+; AVX1-NEXT:    vpslld %xmm4, %xmm6, %xmm6
+; AVX1-NEXT:    vblendvps %xmm2, %xmm7, %xmm6, %xmm2
+; AVX1-NEXT:    vmovdqu 64(%rdi,%rcx,4), %xmm6
+; AVX1-NEXT:    vpslld %xmm13, %xmm6, %xmm7
+; AVX1-NEXT:    vpslld %xmm14, %xmm6, %xmm6
+; AVX1-NEXT:    vblendvps %xmm5, %xmm7, %xmm6, %xmm5
+; AVX1-NEXT:    vmovdqu 80(%rdi,%rcx,4), %xmm6
+; AVX1-NEXT:    vpslld %xmm13, %xmm6, %xmm7
+; AVX1-NEXT:    vpslld %xmm14, %xmm6, %xmm6
+; AVX1-NEXT:    vblendvps %xmm3, %xmm7, %xmm6, %xmm3
+; AVX1-NEXT:    vmovdqu 96(%rdi,%rcx,4), %xmm6
+; AVX1-NEXT:    vpslld %xmm15, %xmm6, %xmm7
+; AVX1-NEXT:    vpslld %xmm11, %xmm6, %xmm6
+; AVX1-NEXT:    vblendvps %xmm9, %xmm7, %xmm6, %xmm6
+; AVX1-NEXT:    vmovdqu 112(%rdi,%rcx,4), %xmm7
+; AVX1-NEXT:    vpslld %xmm15, %xmm7, %xmm0
+; AVX1-NEXT:    vpslld %xmm11, %xmm7, %xmm7
+; AVX1-NEXT:    vblendvps %xmm12, %xmm0, %xmm7, %xmm0
+; AVX1-NEXT:    vmovups %xmm8, (%rdi,%rcx,4)
 ; AVX1-NEXT:    vmovups %xmm10, 16(%rdi,%rcx,4)
-; AVX1-NEXT:    vmovups %xmm8, 32(%rdi,%rcx,4)
-; AVX1-NEXT:    vmovups %xmm0, 48(%rdi,%rcx,4)
-; AVX1-NEXT:    vmovups %xmm3, 64(%rdi,%rcx,4)
-; AVX1-NEXT:    vmovups %xmm4, 80(%rdi,%rcx,4)
-; AVX1-NEXT:    vmovups %xmm1, 96(%rdi,%rcx,4)
-; AVX1-NEXT:    vmovups %xmm5, 112(%rdi,%rcx,4)
+; AVX1-NEXT:    vmovups %xmm1, 32(%rdi,%rcx,4)
+; AVX1-NEXT:    vmovups %xmm2, 48(%rdi,%rcx,4)
+; AVX1-NEXT:    vmovups %xmm5, 64(%rdi,%rcx,4)
+; AVX1-NEXT:    vmovups %xmm3, 80(%rdi,%rcx,4)
+; AVX1-NEXT:    vmovups %xmm6, 96(%rdi,%rcx,4)
+; AVX1-NEXT:    vmovups %xmm0, 112(%rdi,%rcx,4)
 ; AVX1-NEXT:    addq $32, %rcx
 ; AVX1-NEXT:    cmpq %rcx, %rdx
 ; AVX1-NEXT:    jne .LBB0_4
@@ -245,7 +242,6 @@ define void @vector_variable_shift_left_loop(i32* nocapture %arr, i8* nocapture
 ; AVX1-NEXT:    cmpq %rax, %rdx
 ; AVX1-NEXT:    jne .LBB0_6
 ; AVX1-NEXT:  .LBB0_9: # %for.cond.cleanup
-; AVX1-NEXT:    addq $24, %rsp
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ; AVX1-NEXT:    .p2align 4, 0x90
@@ -553,28 +549,23 @@ define void @vector_variable_shift_left_loop_simpler(i32* nocapture %arr, i8* no
 ; SSE-NEXT:    movl %edx, %eax
 ; SSE-NEXT:    andl $-4, %eax
 ; SSE-NEXT:    movd %ecx, %xmm0
-; SSE-NEXT:    movd %r8d, %xmm2
-; SSE-NEXT:    movd %r9d, %xmm3
+; SSE-NEXT:    movd %r8d, %xmm3
+; SSE-NEXT:    movd %r9d, %xmm1
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
 ; SSE-NEXT:    xorl %ecx, %ecx
-; SSE-NEXT:    pslld $23, %xmm0
-; SSE-NEXT:    movdqa {{.*#+}} xmm4 = [1065353216,1065353216,1065353216,1065353216]
-; SSE-NEXT:    paddd %xmm4, %xmm0
-; SSE-NEXT:    cvttps2dq %xmm0, %xmm0
-; SSE-NEXT:    pmulld %xmm3, %xmm0
-; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
-; SSE-NEXT:    pslld $23, %xmm2
-; SSE-NEXT:    paddd %xmm4, %xmm2
-; SSE-NEXT:    cvttps2dq %xmm2, %xmm0
-; SSE-NEXT:    pmulld %xmm3, %xmm0
-; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[0,0,0,0]
+; SSE-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; SSE-NEXT:    movdqa %xmm1, %xmm2
+; SSE-NEXT:    pslld %xmm0, %xmm2
+; SSE-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm3[0],zero,xmm3[1],zero
+; SSE-NEXT:    pslld %xmm0, %xmm1
 ; SSE-NEXT:    pxor %xmm3, %xmm3
 ; SSE-NEXT:    .p2align 4, 0x90
 ; SSE-NEXT:  .LBB1_2: # %vector.body
 ; SSE-NEXT:    # =>This Inner Loop Header: Depth=1
 ; SSE-NEXT:    pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
 ; SSE-NEXT:    pcmpeqd %xmm3, %xmm0
-; SSE-NEXT:    movdqa %xmm2, %xmm4
-; SSE-NEXT:    blendvps %xmm0, %xmm1, %xmm4
+; SSE-NEXT:    movdqa %xmm1, %xmm4
+; SSE-NEXT:    blendvps %xmm0, %xmm2, %xmm4
 ; SSE-NEXT:    movups %xmm4, (%rdi,%rcx,4)
 ; SSE-NEXT:    addq $4, %rcx
 ; SSE-NEXT:    cmpq %rcx, %rax
@@ -592,18 +583,12 @@ define void @vector_variable_shift_left_loop_simpler(i32* nocapture %arr, i8* no
 ; AVX1-NEXT:    vmovd %ecx, %xmm0
 ; AVX1-NEXT:    vmovd %r8d, %xmm1
 ; AVX1-NEXT:    vmovd %r9d, %xmm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
 ; AVX1-NEXT:    xorl %ecx, %ecx
-; AVX1-NEXT:    vpslld $23, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
-; AVX1-NEXT:    vpaddd %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vcvttps2dq %xmm0, %xmm0
-; AVX1-NEXT:    vpmulld %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; AVX1-NEXT:    vpslld $23, %xmm1, %xmm1
-; AVX1-NEXT:    vpaddd %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vcvttps2dq %xmm1, %xmm1
-; AVX1-NEXT:    vpmulld %xmm1, %xmm2, %xmm1
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
+; AVX1-NEXT:    vpslld %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; AVX1-NEXT:    vpslld %xmm1, %xmm2, %xmm1
 ; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    .p2align 4, 0x90
 ; AVX1-NEXT:  .LBB1_2: # %vector.body

diff  --git a/llvm/test/Transforms/CodeGenPrepare/X86/vec-shift.ll b/llvm/test/Transforms/CodeGenPrepare/X86/vec-shift.ll
index d5711c6bb0b9..5779bdaa0922 100644
--- a/llvm/test/Transforms/CodeGenPrepare/X86/vec-shift.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/vec-shift.ll
@@ -114,26 +114,26 @@ define void @vector_variable_shift_left_loop(i32* nocapture %arr, i8* nocapture
 ; AVX1-NEXT:    [[SPLATINSERT20:%.*]] = insertelement <4 x i32> undef, i32 [[AMT1:%.*]], i32 0
 ; AVX1-NEXT:    [[SPLAT2:%.*]] = shufflevector <4 x i32> [[SPLATINSERT20]], <4 x i32> undef, <4 x i32> zeroinitializer
 ; AVX1-NEXT:    [[SPLATINSERT22:%.*]] = insertelement <4 x i32> undef, i32 [[X:%.*]], i32 0
+; AVX1-NEXT:    [[SPLAT3:%.*]] = shufflevector <4 x i32> [[SPLATINSERT22]], <4 x i32> undef, <4 x i32> zeroinitializer
 ; AVX1-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; AVX1:       vector.body:
 ; AVX1-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; AVX1-NEXT:    [[TMP0:%.*]] = shufflevector <4 x i32> [[SPLATINSERT20]], <4 x i32> undef, <4 x i32> zeroinitializer
-; AVX1-NEXT:    [[TMP1:%.*]] = shufflevector <4 x i32> [[SPLATINSERT18]], <4 x i32> undef, <4 x i32> zeroinitializer
-; AVX1-NEXT:    [[TMP2:%.*]] = shufflevector <4 x i32> [[SPLATINSERT22]], <4 x i32> undef, <4 x i32> zeroinitializer
-; AVX1-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, i8* [[CONTROL:%.*]], i64 [[INDEX]]
-; AVX1-NEXT:    [[TMP4:%.*]] = bitcast i8* [[TMP3]] to <4 x i8>*
-; AVX1-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i8>, <4 x i8>* [[TMP4]], align 1
-; AVX1-NEXT:    [[TMP5:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], zeroinitializer
-; AVX1-NEXT:    [[TMP6:%.*]] = select <4 x i1> [[TMP5]], <4 x i32> [[SPLAT1]], <4 x i32> [[SPLAT2]]
-; AVX1-NEXT:    [[TMP7:%.*]] = shl <4 x i32> [[TMP2]], [[TMP1]]
-; AVX1-NEXT:    [[TMP8:%.*]] = shl <4 x i32> [[TMP2]], [[TMP0]]
-; AVX1-NEXT:    [[TMP9:%.*]] = select <4 x i1> [[TMP5]], <4 x i32> [[TMP7]], <4 x i32> [[TMP8]]
-; AVX1-NEXT:    [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[ARR:%.*]], i64 [[INDEX]]
-; AVX1-NEXT:    [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>*
-; AVX1-NEXT:    store <4 x i32> [[TMP9]], <4 x i32>* [[TMP11]], align 4
+; AVX1-NEXT:    [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[CONTROL:%.*]], i64 [[INDEX]]
+; AVX1-NEXT:    [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <4 x i8>*
+; AVX1-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i8>, <4 x i8>* [[TMP1]], align 1
+; AVX1-NEXT:    [[TMP2:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], zeroinitializer
+; AVX1-NEXT:    [[TMP3:%.*]] = select <4 x i1> [[TMP2]], <4 x i32> [[SPLAT1]], <4 x i32> [[SPLAT2]]
+; AVX1-NEXT:    [[TMP4:%.*]] = shufflevector <4 x i32> [[SPLATINSERT18]], <4 x i32> undef, <4 x i32> zeroinitializer
+; AVX1-NEXT:    [[TMP5:%.*]] = shl <4 x i32> [[SPLAT3]], [[TMP4]]
+; AVX1-NEXT:    [[TMP6:%.*]] = shufflevector <4 x i32> [[SPLATINSERT20]], <4 x i32> undef, <4 x i32> zeroinitializer
+; AVX1-NEXT:    [[TMP7:%.*]] = shl <4 x i32> [[SPLAT3]], [[TMP6]]
+; AVX1-NEXT:    [[TMP8:%.*]] = select <4 x i1> [[TMP2]], <4 x i32> [[TMP5]], <4 x i32> [[TMP7]]
+; AVX1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[ARR:%.*]], i64 [[INDEX]]
+; AVX1-NEXT:    [[TMP10:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>*
+; AVX1-NEXT:    store <4 x i32> [[TMP8]], <4 x i32>* [[TMP10]], align 4
 ; AVX1-NEXT:    [[INDEX_NEXT]] = add i64 [[INDEX]], 4
-; AVX1-NEXT:    [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; AVX1-NEXT:    br i1 [[TMP12]], label [[EXIT]], label [[VECTOR_BODY]]
+; AVX1-NEXT:    [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; AVX1-NEXT:    br i1 [[TMP11]], label [[EXIT]], label [[VECTOR_BODY]]
 ; AVX1:       exit:
 ; AVX1-NEXT:    ret void
 ;


        


More information about the llvm-commits mailing list