[llvm] 10c256c - Revert "[X86] Fold shuffle(not(x),undef) -> not(shuffle(x,undef))"

Benjamin Kramer via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 2 02:29:04 PST 2021


Author: Benjamin Kramer
Date: 2021-03-02T11:24:07+01:00
New Revision: 10c256ccaf520eed766f594f897b5bafdc8061ae

URL: https://github.com/llvm/llvm-project/commit/10c256ccaf520eed766f594f897b5bafdc8061ae
DIFF: https://github.com/llvm/llvm-project/commit/10c256ccaf520eed766f594f897b5bafdc8061ae.diff

LOG: Revert "[X86] Fold shuffle(not(x),undef) -> not(shuffle(x,undef))"

This reverts commit 925093d88ae74560a8e94cf66f95d60ea3ffa2d3.

Causes an infinite loop when compiling some shuffles:

$ cat bugpoint-reduced-simplified.ll
target triple = "x86_64-unknown-linux-gnu"

define void @foo() {
entry:
  %0 = load i8, i8* undef, align 1
  %broadcast.splatinsert = insertelement <16 x i8> poison, i8 %0, i32 0
  %1 = icmp ne <16 x i8> %broadcast.splatinsert, zeroinitializer
  %2 = shufflevector <16 x i1> %1, <16 x i1> undef, <16 x i32> zeroinitializer
  %wide.load = load <16 x i8>, <16 x i8>* undef, align 1
  %3 = icmp ne <16 x i8> %wide.load, zeroinitializer
  %4 = and <16 x i1> %3, %2
  %5 = zext <16 x i1> %4 to <16 x i8>
  store <16 x i8> %5, <16 x i8>* undef, align 1
  ret void
}

$ llc < bugpoint-reduced-simplified.ll
<timeout>

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/combine-bitselect.ll
    llvm/test/CodeGen/X86/promote-cmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2e9022205801..86052fad5721 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -38003,19 +38003,6 @@ static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
 
     if (SDValue HAddSub = foldShuffleOfHorizOp(N, DAG))
       return HAddSub;
-
-    // Fold shuffle(not(x),undef) -> not(shuffle(x,undef)).
-    if (N->getOpcode() == ISD::VECTOR_SHUFFLE &&
-        N->getOperand(0).getOpcode() == ISD::XOR &&
-        N->getOperand(1).isUndef() &&
-        N->isOnlyUserOf(N->getOperand(0).getNode())) {
-      if (SDValue Not = IsNOT(N->getOperand(0), DAG, true)) {
-        SDValue NewShuffle = DAG.getVectorShuffle(
-            VT, dl, DAG.getBitcast(VT, Not), DAG.getUNDEF(VT),
-            cast<ShuffleVectorSDNode>(N)->getMask());
-        return DAG.getNOT(dl, NewShuffle, VT);
-      }
-    }
   }
 
   // Attempt to combine into a vector load/broadcast.

diff  --git a/llvm/test/CodeGen/X86/combine-bitselect.ll b/llvm/test/CodeGen/X86/combine-bitselect.ll
index 5c8b8f691fb8..d57bd877500c 100644
--- a/llvm/test/CodeGen/X86/combine-bitselect.ll
+++ b/llvm/test/CodeGen/X86/combine-bitselect.ll
@@ -505,18 +505,26 @@ define <4 x i64> @bitselect_v4i64_broadcast_rrr(<4 x i64> %a0, <4 x i64> %a1, i6
 ; XOP-LABEL: bitselect_v4i64_broadcast_rrr:
 ; XOP:       # %bb.0:
 ; XOP-NEXT:    vmovq %rdi, %xmm2
+; XOP-NEXT:    vmovq %rdi, %xmm3
 ; XOP-NEXT:    vmovddup {{.*#+}} xmm2 = xmm2[0,0]
 ; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
-; XOP-NEXT:    vpcmov %ymm2, %ymm1, %ymm0, %ymm0
+; XOP-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,0,1]
+; XOP-NEXT:    vinsertf128 $1, %xmm3, %ymm3, %ymm3
+; XOP-NEXT:    vandps %ymm2, %ymm0, %ymm0
+; XOP-NEXT:    vandnps %ymm1, %ymm3, %ymm1
+; XOP-NEXT:    vorps %ymm1, %ymm0, %ymm0
 ; XOP-NEXT:    retq
 ;
 ; AVX1-LABEL: bitselect_v4i64_broadcast_rrr:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovq %rdi, %xmm2
+; AVX1-NEXT:    vmovq %rdi, %xmm3
 ; AVX1-NEXT:    vmovddup {{.*#+}} xmm2 = xmm2[0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm2, %ymm2
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm3, %ymm3
 ; AVX1-NEXT:    vandps %ymm2, %ymm0, %ymm0
-; AVX1-NEXT:    vandnps %ymm1, %ymm2, %ymm1
+; AVX1-NEXT:    vandnps %ymm1, %ymm3, %ymm1
 ; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;
@@ -873,22 +881,32 @@ define <8 x i64> @bitselect_v8i64_broadcast_rrr(<8 x i64> %a0, <8 x i64> %a1, i6
 ; XOP-LABEL: bitselect_v8i64_broadcast_rrr:
 ; XOP:       # %bb.0:
 ; XOP-NEXT:    vmovq %rdi, %xmm4
+; XOP-NEXT:    vmovq %rdi, %xmm5
 ; XOP-NEXT:    vmovddup {{.*#+}} xmm4 = xmm4[0,0]
 ; XOP-NEXT:    vinsertf128 $1, %xmm4, %ymm4, %ymm4
-; XOP-NEXT:    vpcmov %ymm4, %ymm2, %ymm0, %ymm0
-; XOP-NEXT:    vpcmov %ymm4, %ymm3, %ymm1, %ymm1
+; XOP-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,0,1]
+; XOP-NEXT:    vinsertf128 $1, %xmm5, %ymm5, %ymm5
+; XOP-NEXT:    vandps %ymm4, %ymm1, %ymm1
+; XOP-NEXT:    vandps %ymm4, %ymm0, %ymm0
+; XOP-NEXT:    vandnps %ymm3, %ymm5, %ymm3
+; XOP-NEXT:    vorps %ymm3, %ymm1, %ymm1
+; XOP-NEXT:    vandnps %ymm2, %ymm5, %ymm2
+; XOP-NEXT:    vorps %ymm2, %ymm0, %ymm0
 ; XOP-NEXT:    retq
 ;
 ; AVX1-LABEL: bitselect_v8i64_broadcast_rrr:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vmovq %rdi, %xmm4
+; AVX1-NEXT:    vmovq %rdi, %xmm5
 ; AVX1-NEXT:    vmovddup {{.*#+}} xmm4 = xmm4[0,0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm4, %ymm4
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm5, %ymm5
 ; AVX1-NEXT:    vandps %ymm4, %ymm1, %ymm1
 ; AVX1-NEXT:    vandps %ymm4, %ymm0, %ymm0
-; AVX1-NEXT:    vandnps %ymm3, %ymm4, %ymm3
+; AVX1-NEXT:    vandnps %ymm3, %ymm5, %ymm3
 ; AVX1-NEXT:    vorps %ymm3, %ymm1, %ymm1
-; AVX1-NEXT:    vandnps %ymm2, %ymm4, %ymm2
+; AVX1-NEXT:    vandnps %ymm2, %ymm5, %ymm2
 ; AVX1-NEXT:    vorps %ymm2, %ymm0, %ymm0
 ; AVX1-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/promote-cmp.ll b/llvm/test/CodeGen/X86/promote-cmp.ll
index 1350b2b9e065..c59f808a3029 100644
--- a/llvm/test/CodeGen/X86/promote-cmp.ll
+++ b/llvm/test/CodeGen/X86/promote-cmp.ll
@@ -47,16 +47,17 @@ define <4 x i64> @PR45808(<4 x i64> %0, <4 x i64> %1) {
 ; SSE4-LABEL: PR45808:
 ; SSE4:       # %bb.0:
 ; SSE4-NEXT:    movdqa %xmm0, %xmm4
-; SSE4-NEXT:    movdqa %xmm0, %xmm5
-; SSE4-NEXT:    pcmpgtq %xmm2, %xmm5
 ; SSE4-NEXT:    movdqa %xmm1, %xmm0
 ; SSE4-NEXT:    pcmpgtq %xmm3, %xmm0
+; SSE4-NEXT:    movdqa %xmm4, %xmm5
+; SSE4-NEXT:    pcmpgtq %xmm2, %xmm5
+; SSE4-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
 ; SSE4-NEXT:    pcmpeqd %xmm6, %xmm6
-; SSE4-NEXT:    pxor %xmm6, %xmm5
+; SSE4-NEXT:    pxor %xmm5, %xmm6
 ; SSE4-NEXT:    psllq $63, %xmm0
 ; SSE4-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
-; SSE4-NEXT:    psllq $63, %xmm5
-; SSE4-NEXT:    movdqa %xmm5, %xmm0
+; SSE4-NEXT:    pmovzxdq {{.*#+}} xmm0 = xmm6[0],zero,xmm6[1],zero
+; SSE4-NEXT:    psllq $63, %xmm0
 ; SSE4-NEXT:    blendvpd %xmm0, %xmm4, %xmm2
 ; SSE4-NEXT:    movapd %xmm2, %xmm0
 ; SSE4-NEXT:    movapd %xmm3, %xmm1


        


More information about the llvm-commits mailing list