[llvm] r324129 - Partially revert r324124 [X86] Add tests for missed opportunities to use ptest for all ones comparison.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 2 12:12:44 PST 2018


Author: ctopper
Date: Fri Feb  2 12:12:44 2018
New Revision: 324129

URL: http://llvm.org/viewvc/llvm-project?rev=324129&view=rev
Log:
Partially revert r324124 [X86] Add tests for missed opportunities to use ptest for all ones comparison.

Turns out I misunderstood the flag behavior of PTEST because I read the documentation for KORTEST which is different than PTEST/KTEST and made a bad assumption.

Keep the test rename though cause that's useful.

Modified:
    llvm/trunk/test/CodeGen/X86/ptest.ll

Modified: llvm/trunk/test/CodeGen/X86/ptest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/ptest.ll?rev=324129&r1=324128&r2=324129&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/ptest.ll (original)
+++ llvm/trunk/test/CodeGen/X86/ptest.ll Fri Feb  2 12:12:44 2018
@@ -240,272 +240,3 @@ define i32 @vecsel512(<16 x i32> %input,
   %t2 = select i1 %t1, i32 %a, i32 %b
   ret i32 %t2
 }
-
-define i32 @veccond128_ones(<4 x i32> %input) {
-; SSE41-LABEL: veccond128_ones:
-; SSE41:       # %bb.0: # %entry
-; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pcmpeqb %xmm0, %xmm1
-; SSE41-NEXT:    pmovmskb %xmm1, %eax
-; SSE41-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
-; SSE41-NEXT:    je .LBB9_2
-; SSE41-NEXT:  # %bb.1: # %if-true-block
-; SSE41-NEXT:    xorl %eax, %eax
-; SSE41-NEXT:    retq
-; SSE41-NEXT:  .LBB9_2: # %endif-block
-; SSE41-NEXT:    movl $1, %eax
-; SSE41-NEXT:    retq
-;
-; AVX-LABEL: veccond128_ones:
-; AVX:       # %bb.0: # %entry
-; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpmovmskb %xmm0, %eax
-; AVX-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
-; AVX-NEXT:    je .LBB9_2
-; AVX-NEXT:  # %bb.1: # %if-true-block
-; AVX-NEXT:    xorl %eax, %eax
-; AVX-NEXT:    retq
-; AVX-NEXT:  .LBB9_2: # %endif-block
-; AVX-NEXT:    movl $1, %eax
-; AVX-NEXT:    retq
-entry:
-  %0 = bitcast <4 x i32> %input to i128
-  %1 = icmp ne i128 %0, -1
-  br i1 %1, label %if-true-block, label %endif-block
-if-true-block:
-  ret i32 0
-endif-block:
-  ret i32 1
-}
-
-define i32 @veccond256_ones(<8 x i32> %input) {
-; SSE41-LABEL: veccond256_ones:
-; SSE41:       # %bb.0: # %entry
-; SSE41-NEXT:    movq %xmm1, %rax
-; SSE41-NEXT:    movq %xmm0, %rcx
-; SSE41-NEXT:    pextrq $1, %xmm1, %rdx
-; SSE41-NEXT:    pextrq $1, %xmm0, %rsi
-; SSE41-NEXT:    andq %rdx, %rsi
-; SSE41-NEXT:    andq %rax, %rsi
-; SSE41-NEXT:    andq %rcx, %rsi
-; SSE41-NEXT:    cmpq $-1, %rsi
-; SSE41-NEXT:    je .LBB10_2
-; SSE41-NEXT:  # %bb.1: # %if-true-block
-; SSE41-NEXT:    xorl %eax, %eax
-; SSE41-NEXT:    retq
-; SSE41-NEXT:  .LBB10_2: # %endif-block
-; SSE41-NEXT:    movl $1, %eax
-; SSE41-NEXT:    retq
-;
-; AVX-LABEL: veccond256_ones:
-; AVX:       # %bb.0: # %entry
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT:    vmovq %xmm1, %rax
-; AVX-NEXT:    vmovq %xmm0, %rcx
-; AVX-NEXT:    vpextrq $1, %xmm1, %rdx
-; AVX-NEXT:    vpextrq $1, %xmm0, %rsi
-; AVX-NEXT:    andq %rdx, %rsi
-; AVX-NEXT:    andq %rax, %rsi
-; AVX-NEXT:    andq %rcx, %rsi
-; AVX-NEXT:    cmpq $-1, %rsi
-; AVX-NEXT:    je .LBB10_2
-; AVX-NEXT:  # %bb.1: # %if-true-block
-; AVX-NEXT:    xorl %eax, %eax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
-; AVX-NEXT:  .LBB10_2: # %endif-block
-; AVX-NEXT:    movl $1, %eax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
-entry:
-  %0 = bitcast <8 x i32> %input to i256
-  %1 = icmp ne i256 %0, -1
-  br i1 %1, label %if-true-block, label %endif-block
-if-true-block:
-  ret i32 0
-endif-block:
-  ret i32 1
-}
-
-define i32 @veccond512_ones(<16 x i32> %input) {
-; SSE41-LABEL: veccond512_ones:
-; SSE41:       # %bb.0: # %entry
-; SSE41-NEXT:    movq %xmm2, %rax
-; SSE41-NEXT:    movq %xmm0, %r8
-; SSE41-NEXT:    movq %xmm3, %rdx
-; SSE41-NEXT:    movq %xmm1, %rsi
-; SSE41-NEXT:    andq %rdx, %rsi
-; SSE41-NEXT:    andq %rax, %rsi
-; SSE41-NEXT:    pextrq $1, %xmm2, %rax
-; SSE41-NEXT:    pextrq $1, %xmm0, %rdx
-; SSE41-NEXT:    pextrq $1, %xmm3, %rdi
-; SSE41-NEXT:    pextrq $1, %xmm1, %rcx
-; SSE41-NEXT:    andq %rdi, %rcx
-; SSE41-NEXT:    andq %rax, %rcx
-; SSE41-NEXT:    andq %rdx, %rcx
-; SSE41-NEXT:    andq %rsi, %rcx
-; SSE41-NEXT:    andq %r8, %rcx
-; SSE41-NEXT:    cmpq $-1, %rcx
-; SSE41-NEXT:    je .LBB11_2
-; SSE41-NEXT:  # %bb.1: # %if-true-block
-; SSE41-NEXT:    xorl %eax, %eax
-; SSE41-NEXT:    retq
-; SSE41-NEXT:  .LBB11_2: # %endif-block
-; SSE41-NEXT:    movl $1, %eax
-; SSE41-NEXT:    retq
-;
-; AVX-LABEL: veccond512_ones:
-; AVX:       # %bb.0: # %entry
-; AVX-NEXT:    vmovq %xmm1, %rax
-; AVX-NEXT:    vmovq %xmm0, %r8
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX-NEXT:    vmovq %xmm2, %rdx
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX-NEXT:    vmovq %xmm3, %rsi
-; AVX-NEXT:    andq %rdx, %rsi
-; AVX-NEXT:    andq %rax, %rsi
-; AVX-NEXT:    vpextrq $1, %xmm1, %rax
-; AVX-NEXT:    vpextrq $1, %xmm0, %rdx
-; AVX-NEXT:    vpextrq $1, %xmm2, %rdi
-; AVX-NEXT:    vpextrq $1, %xmm3, %rcx
-; AVX-NEXT:    andq %rdi, %rcx
-; AVX-NEXT:    andq %rax, %rcx
-; AVX-NEXT:    andq %rdx, %rcx
-; AVX-NEXT:    andq %rsi, %rcx
-; AVX-NEXT:    andq %r8, %rcx
-; AVX-NEXT:    cmpq $-1, %rcx
-; AVX-NEXT:    je .LBB11_2
-; AVX-NEXT:  # %bb.1: # %if-true-block
-; AVX-NEXT:    xorl %eax, %eax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
-; AVX-NEXT:  .LBB11_2: # %endif-block
-; AVX-NEXT:    movl $1, %eax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
-entry:
-  %0 = bitcast <16 x i32> %input to i512
-  %1 = icmp ne i512 %0, -1
-  br i1 %1, label %if-true-block, label %endif-block
-if-true-block:
-  ret i32 0
-endif-block:
-  ret i32 1
-}
-
-define i32 @vecsel128_ones(<4 x i32> %input, i32 %a, i32 %b) {
-; SSE41-LABEL: vecsel128_ones:
-; SSE41:       # %bb.0:
-; SSE41-NEXT:    pcmpeqd %xmm1, %xmm1
-; SSE41-NEXT:    pcmpeqb %xmm0, %xmm1
-; SSE41-NEXT:    pmovmskb %xmm1, %eax
-; SSE41-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
-; SSE41-NEXT:    cmovel %esi, %edi
-; SSE41-NEXT:    movl %edi, %eax
-; SSE41-NEXT:    retq
-;
-; AVX-LABEL: vecsel128_ones:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpmovmskb %xmm0, %eax
-; AVX-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
-; AVX-NEXT:    cmovel %esi, %edi
-; AVX-NEXT:    movl %edi, %eax
-; AVX-NEXT:    retq
-  %t0 = bitcast <4 x i32> %input to i128
-  %t1 = icmp ne i128 %t0, -1
-  %t2 = select i1 %t1, i32 %a, i32 %b
-  ret i32 %t2
-}
-
-define i32 @vecsel256_ones(<8 x i32> %input, i32 %a, i32 %b) {
-; SSE41-LABEL: vecsel256_ones:
-; SSE41:       # %bb.0:
-; SSE41-NEXT:    movq %xmm1, %rax
-; SSE41-NEXT:    movq %xmm0, %r8
-; SSE41-NEXT:    pextrq $1, %xmm1, %rdx
-; SSE41-NEXT:    pextrq $1, %xmm0, %rcx
-; SSE41-NEXT:    andq %rdx, %rcx
-; SSE41-NEXT:    andq %rax, %rcx
-; SSE41-NEXT:    andq %r8, %rcx
-; SSE41-NEXT:    cmpq $-1, %rcx
-; SSE41-NEXT:    cmovel %esi, %edi
-; SSE41-NEXT:    movl %edi, %eax
-; SSE41-NEXT:    retq
-;
-; AVX-LABEL: vecsel256_ones:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT:    vmovq %xmm1, %r8
-; AVX-NEXT:    vmovq %xmm0, %rcx
-; AVX-NEXT:    vpextrq $1, %xmm1, %rdx
-; AVX-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX-NEXT:    andq %rdx, %rax
-; AVX-NEXT:    andq %r8, %rax
-; AVX-NEXT:    andq %rcx, %rax
-; AVX-NEXT:    cmpq $-1, %rax
-; AVX-NEXT:    cmovel %esi, %edi
-; AVX-NEXT:    movl %edi, %eax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
-  %t0 = bitcast <8 x i32> %input to i256
-  %t1 = icmp ne i256 %t0, -1
-  %t2 = select i1 %t1, i32 %a, i32 %b
-  ret i32 %t2
-}
-
-define i32 @vecsel512_ones(<16 x i32> %input, i32 %a, i32 %b) {
-; SSE41-LABEL: vecsel512_ones:
-; SSE41:       # %bb.0:
-; SSE41-NEXT:    movq %xmm2, %rax
-; SSE41-NEXT:    movq %xmm0, %r8
-; SSE41-NEXT:    movq %xmm3, %rdx
-; SSE41-NEXT:    movq %xmm1, %rcx
-; SSE41-NEXT:    andq %rdx, %rcx
-; SSE41-NEXT:    andq %rax, %rcx
-; SSE41-NEXT:    pextrq $1, %xmm2, %r10
-; SSE41-NEXT:    pextrq $1, %xmm0, %r9
-; SSE41-NEXT:    pextrq $1, %xmm3, %rdx
-; SSE41-NEXT:    pextrq $1, %xmm1, %rax
-; SSE41-NEXT:    andq %rdx, %rax
-; SSE41-NEXT:    andq %r10, %rax
-; SSE41-NEXT:    andq %r9, %rax
-; SSE41-NEXT:    andq %rcx, %rax
-; SSE41-NEXT:    andq %r8, %rax
-; SSE41-NEXT:    cmpq $-1, %rax
-; SSE41-NEXT:    cmovel %esi, %edi
-; SSE41-NEXT:    movl %edi, %eax
-; SSE41-NEXT:    retq
-;
-; AVX-LABEL: vecsel512_ones:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovq %xmm1, %rax
-; AVX-NEXT:    vmovq %xmm0, %r8
-; AVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX-NEXT:    vmovq %xmm2, %rdx
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX-NEXT:    vmovq %xmm3, %rcx
-; AVX-NEXT:    andq %rdx, %rcx
-; AVX-NEXT:    andq %rax, %rcx
-; AVX-NEXT:    vpextrq $1, %xmm1, %r10
-; AVX-NEXT:    vpextrq $1, %xmm0, %r9
-; AVX-NEXT:    vpextrq $1, %xmm2, %rdx
-; AVX-NEXT:    vpextrq $1, %xmm3, %rax
-; AVX-NEXT:    andq %rdx, %rax
-; AVX-NEXT:    andq %r10, %rax
-; AVX-NEXT:    andq %r9, %rax
-; AVX-NEXT:    andq %rcx, %rax
-; AVX-NEXT:    andq %r8, %rax
-; AVX-NEXT:    cmpq $-1, %rax
-; AVX-NEXT:    cmovel %esi, %edi
-; AVX-NEXT:    movl %edi, %eax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
-  %t0 = bitcast <16 x i32> %input to i512
-  %t1 = icmp ne i512 %t0, -1
-  %t2 = select i1 %t1, i32 %a, i32 %b
-  ret i32 %t2
-}
-




More information about the llvm-commits mailing list