[llvm] r323672 - [X86] Don't create SHRUNKBLEND when the condition is used by the true or false operand of the vselect.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 29 09:56:57 PST 2018


Author: ctopper
Date: Mon Jan 29 09:56:57 2018
New Revision: 323672

URL: http://llvm.org/viewvc/llvm-project?rev=323672&view=rev
Log:
[X86] Don't create SHRUNKBLEND when the condition is used by the true or false operand of the vselect.

Fixes PR34592.

Differential Revision: https://reviews.llvm.org/D42628

Modified:
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/test/CodeGen/X86/pr34592.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=323672&r1=323671&r2=323672&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Mon Jan 29 09:56:57 2018
@@ -31828,9 +31828,10 @@ static SDValue combineSelect(SDNode *N,
         // Check all uses of the condition operand to check whether it will be
         // consumed by non-BLEND instructions. Those may require that all bits
         // are set properly.
-        for (SDNode *U : Cond->uses()) {
+        for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
+             UI != UE; ++UI) {
           // TODO: Add other opcodes eventually lowered into BLEND.
-          if (U->getOpcode() != ISD::VSELECT)
+          if (UI->getOpcode() != ISD::VSELECT || UI.getOperandNo() != 0)
             return SDValue();
         }
 

Modified: llvm/trunk/test/CodeGen/X86/pr34592.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pr34592.ll?rev=323672&r1=323671&r2=323672&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pr34592.ll (original)
+++ llvm/trunk/test/CodeGen/X86/pr34592.ll Mon Jan 29 09:56:57 2018
@@ -10,7 +10,7 @@ define <16 x i64> @pluto(<16 x i64> %arg
 ; CHECK-NEXT:    movq %rsp, %rbp
 ; CHECK-NEXT:    .cfi_def_cfa_register %rbp
 ; CHECK-NEXT:    andq $-32, %rsp
-; CHECK-NEXT:    subq $128, %rsp
+; CHECK-NEXT:    subq $288, %rsp # imm = 0x120
 ; CHECK-NEXT:    vmovaps 240(%rbp), %ymm8
 ; CHECK-NEXT:    vmovaps 208(%rbp), %ymm9
 ; CHECK-NEXT:    vmovaps 176(%rbp), %ymm10
@@ -23,39 +23,42 @@ define <16 x i64> @pluto(<16 x i64> %arg
 ; CHECK-NEXT:    vmovaps {{.*#+}} ymm0 = [0,0,18446744071562067968,18446744071562067968]
 ; CHECK-NEXT:    vblendvpd %ymm0, %ymm2, %ymm6, %ymm0
 ; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; CHECK-NEXT:    vblendvpd %ymm2, %ymm3, %ymm7, %ymm3
-; CHECK-NEXT:    vblendvpd %ymm2, %ymm1, %ymm5, %ymm1
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm13[2,3],ymm2[4,5,6,7]
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm6 = ymm14[0,1],ymm2[2,3,4,5],ymm14[6,7]
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm7 = ymm15[0,1],ymm11[2,3,4,5,6,7]
-; CHECK-NEXT:    vblendvpd %ymm2, %ymm10, %ymm6, %ymm6
-; CHECK-NEXT:    vmovaps {{.*#+}} ymm10 = [18446744071562067968,18446744071562067968,0,0]
-; CHECK-NEXT:    vblendvpd %ymm10, %ymm9, %ymm5, %ymm5
-; CHECK-NEXT:    vblendvpd %ymm2, %ymm8, %ymm2, %ymm2
-; CHECK-NEXT:    vpshufd {{.*#+}} ymm8 = ymm1[0,1,0,1,4,5,4,5]
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm9 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
-; CHECK-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,1,1,3]
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3,4,5],ymm8[6,7]
+; CHECK-NEXT:    vpblendd {{.*#+}} ymm6 = ymm2[0,1],ymm13[2,3],ymm2[4,5,6,7]
+; CHECK-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm8[2,3,4,5,6,7]
+; CHECK-NEXT:    vmovaps {{.*#+}} ymm8 = [18446744071562067968,18446744071562067968,0,0]
+; CHECK-NEXT:    vblendvpd %ymm8, %ymm9, %ymm6, %ymm6
+; CHECK-NEXT:    vpblendd {{.*#+}} ymm8 = ymm15[0,1],ymm11[2,3,4,5,6,7]
+; CHECK-NEXT:    vpalignr {{.*#+}} ymm8 = ymm0[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm0[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
+; CHECK-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,0]
+; CHECK-NEXT:    vmovaps %xmm6, %xmm9
+; CHECK-NEXT:    # implicit-def: %ymm11
+; CHECK-NEXT:    vinserti128 $1, %xmm9, %ymm11, %ymm11
+; CHECK-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5],ymm8[6,7]
 ; CHECK-NEXT:    vmovaps %xmm0, %xmm9
-; CHECK-NEXT:    # implicit-def: %ymm10
-; CHECK-NEXT:    vinserti128 $1, %xmm9, %ymm10, %ymm10
-; CHECK-NEXT:    vpunpcklqdq {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[2],ymm2[2]
-; CHECK-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm10[4,5,6,7]
-; CHECK-NEXT:    vpalignr {{.*#+}} ymm1 = ymm5[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm5[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
-; CHECK-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,3]
-; CHECK-NEXT:    vpunpcklqdq {{.*#+}} ymm3 = ymm6[0],ymm3[0],ymm6[2],ymm3[2]
-; CHECK-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; CHECK-NEXT:    vpalignr {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],ymm0[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
-; CHECK-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,0]
-; CHECK-NEXT:    vmovaps %xmm5, %xmm9
-; CHECK-NEXT:    # implicit-def: %ymm1
-; CHECK-NEXT:    vinserti128 $1, %xmm9, %ymm1, %ymm1
-; CHECK-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7]
-; CHECK-NEXT:    vmovaps %ymm8, %ymm1
+; CHECK-NEXT:    # implicit-def: %ymm0
+; CHECK-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm0
+; CHECK-NEXT:    vpunpcklqdq {{.*#+}} ymm11 = ymm7[0],ymm2[0],ymm7[2],ymm2[2]
+; CHECK-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,1,2,3]
+; CHECK-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm0[4,5,6,7]
+; CHECK-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1],ymm2[2,3],ymm7[4,5,6,7]
+; CHECK-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,1,3]
+; CHECK-NEXT:    vpshufd {{.*#+}} ymm11 = ymm5[0,1,0,1,4,5,4,5]
+; CHECK-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm11[6,7]
+; CHECK-NEXT:    vpalignr {{.*#+}} ymm5 = ymm6[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
+; CHECK-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,3]
+; CHECK-NEXT:    vpslldq {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,ymm7[0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,ymm7[16,17,18,19,20,21,22,23]
+; CHECK-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
+; CHECK-NEXT:    vmovaps %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT:    vmovaps %ymm8, %ymm0
+; CHECK-NEXT:    vmovaps %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT:    vmovaps %ymm2, %ymm1
+; CHECK-NEXT:    vmovaps {{[0-9]+}}(%rsp), %ymm2 # 32-byte Reload
+; CHECK-NEXT:    vmovaps %ymm3, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT:    vmovaps %ymm5, %ymm3
+; CHECK-NEXT:    vmovaps %ymm10, {{[0-9]+}}(%rsp) # 32-byte Spill
+; CHECK-NEXT:    vmovaps %ymm12, {{[0-9]+}}(%rsp) # 32-byte Spill
 ; CHECK-NEXT:    vmovaps %ymm4, {{[0-9]+}}(%rsp) # 32-byte Spill
-; CHECK-NEXT:    vmovaps %ymm12, (%rsp) # 32-byte Spill
+; CHECK-NEXT:    vmovaps %ymm14, (%rsp) # 32-byte Spill
 ; CHECK-NEXT:    movq %rbp, %rsp
 ; CHECK-NEXT:    popq %rbp
 ; CHECK-NEXT:    retq




More information about the llvm-commits mailing list