[llvm] 8b92544 - [X86][SSE] Simplify PTEST/TESTP tests for D76984

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 31 10:02:46 PDT 2020


Author: Simon Pilgrim
Date: 2020-03-31T18:02:27+01:00
New Revision: 8b925440d11b64108935f4fceb30d372caa68211

URL: https://github.com/llvm/llvm-project/commit/8b925440d11b64108935f4fceb30d372caa68211
DIFF: https://github.com/llvm/llvm-project/commit/8b925440d11b64108935f4fceb30d372caa68211.diff

LOG: [X86][SSE] Simplify PTEST/TESTP tests for D76984

We don't need to use an allones for the second operand - test the general case.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/combine-ptest.ll
    llvm/test/CodeGen/X86/combine-testpd.ll
    llvm/test/CodeGen/X86/combine-testps.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/combine-ptest.ll b/llvm/test/CodeGen/X86/combine-ptest.ll
index 6a4176a34d1b..3a888aaa03e8 100644
--- a/llvm/test/CodeGen/X86/combine-ptest.ll
+++ b/llvm/test/CodeGen/X86/combine-ptest.ll
@@ -2,114 +2,76 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
 
 ;
-; testz(~X,-1) -> testc(X,-1)
+; testz(~X,Y) -> testc(X,Y)
 ;
 
-define i32 @ptestz_128_invert(<2 x i64> %c, i32 %a, i32 %b) {
+define i32 @ptestz_128_invert(<2 x i64> %c, <2 x i64> %d, i32 %a, i32 %b) {
 ; CHECK-LABEL: ptestz_128_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    vptest %xmm1, %xmm0
 ; CHECK-NEXT:    cmovnel %esi, %eax
 ; CHECK-NEXT:    retq
   %t1 = xor <2 x i64> %c, <i64 -1, i64 -1>
-  %t2 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %t1, <2 x i64> <i64 -1, i64 -1>)
+  %t2 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %t1, <2 x i64> %d)
   %t3 = icmp ne i32 %t2, 0
   %t4 = select i1 %t3, i32 %a, i32 %b
   ret i32 %t4
 }
 
-define i32 @ptestz_256_invert(<4 x i64> %c, i32 %a, i32 %b) {
+define i32 @ptestz_256_invert(<4 x i64> %c, <4 x i64> %d, i32 %a, i32 %b) {
 ; CHECK-LABEL: ptestz_256_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
-; CHECK-NEXT:    vxorps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vcmptrueps %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vxorps %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    vptest %ymm1, %ymm0
 ; CHECK-NEXT:    cmovnel %esi, %eax
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
   %t1 = xor <4 x i64> %c, <i64 -1, i64 -1, i64 -1, i64 -1>
-  %t2 = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %t1, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>)
+  %t2 = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %t1, <4 x i64> %d)
   %t3 = icmp ne i32 %t2, 0
   %t4 = select i1 %t3, i32 %a, i32 %b
   ret i32 %t4
 }
 
 ;
-; testc(~X,-1) -> testz(X,-1)
+; testc(~X,Y) -> testz(X,Y)
 ;
 
-define i32 @ptestc_128_invert(<2 x i64> %c, i32 %a, i32 %b) {
+define i32 @ptestc_128_invert(<2 x i64> %c, <2 x i64> %d, i32 %a, i32 %b) {
 ; CHECK-LABEL: ptestc_128_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    vptest %xmm1, %xmm0
 ; CHECK-NEXT:    cmovael %esi, %eax
 ; CHECK-NEXT:    retq
   %t1 = xor <2 x i64> %c, <i64 -1, i64 -1>
-  %t2 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %t1, <2 x i64> <i64 -1, i64 -1>)
+  %t2 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %t1, <2 x i64> %d)
   %t3 = icmp ne i32 %t2, 0
   %t4 = select i1 %t3, i32 %a, i32 %b
   ret i32 %t4
 }
 
-define i32 @ptestc_256_invert(<4 x i64> %c, i32 %a, i32 %b) {
+define i32 @ptestc_256_invert(<4 x i64> %c, <4 x i64> %d, i32 %a, i32 %b) {
 ; CHECK-LABEL: ptestc_256_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
-; CHECK-NEXT:    vxorps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vcmptrueps %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vxorps %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    vptest %ymm1, %ymm0
 ; CHECK-NEXT:    cmovael %esi, %eax
 ; CHECK-NEXT:    vzeroupper
 ; CHECK-NEXT:    retq
   %t1 = xor <4 x i64> %c, <i64 -1, i64 -1, i64 -1, i64 -1>
-  %t2 = call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %t1, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>)
-  %t3 = icmp ne i32 %t2, 0
-  %t4 = select i1 %t3, i32 %a, i32 %b
-  ret i32 %t4
-}
-
-;
-; testnzc(~X,-1) -> testnzc(X,-1)
-;
-
-define i32 @ptestnzc_128_invert(<2 x i64> %c, i32 %a, i32 %b) {
-; CHECK-LABEL: ptestnzc_128_invert:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vptest %xmm1, %xmm0
-; CHECK-NEXT:    cmovael %esi, %eax
-; CHECK-NEXT:    retq
-  %t1 = xor <2 x i64> %c, <i64 -1, i64 -1>
-  %t2 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %t1, <2 x i64> <i64 -1, i64 -1>)
-  %t3 = icmp ne i32 %t2, 0
-  %t4 = select i1 %t3, i32 %a, i32 %b
-  ret i32 %t4
-}
-
-define i32 @ptestnzc_256_invert(<4 x i64> %c, i32 %a, i32 %b) {
-; CHECK-LABEL: ptestnzc_256_invert:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
-; CHECK-NEXT:    vxorps %ymm1, %ymm0, %ymm0
-; CHECK-NEXT:    vptest %ymm1, %ymm0
-; CHECK-NEXT:    cmovbel %esi, %eax
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
-  %t1 = xor <4 x i64> %c, <i64 -1, i64 -1, i64 -1, i64 -1>
-  %t2 = call i32 @llvm.x86.avx.ptestnzc.256(<4 x i64> %t1, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>)
+  %t2 = call i32 @llvm.x86.avx.ptestc.256(<4 x i64> %t1, <4 x i64> %d)
   %t3 = icmp ne i32 %t2, 0
   %t4 = select i1 %t3, i32 %a, i32 %b
   ret i32 %t4
@@ -119,8 +81,8 @@ define i32 @ptestnzc_256_invert(<4 x i64> %c, i32 %a, i32 %b) {
 ; testnzc(~X,Y) -> testnzc(X,Y)
 ;
 
-define i32 @ptestnzc_128_flip(<2 x i64> %c, <2 x i64> %d, i32 %a, i32 %b) {
-; CHECK-LABEL: ptestnzc_128_flip:
+define i32 @ptestnzc_128_invert(<2 x i64> %c, <2 x i64> %d, i32 %a, i32 %b) {
+; CHECK-LABEL: ptestnzc_128_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -135,8 +97,8 @@ define i32 @ptestnzc_128_flip(<2 x i64> %c, <2 x i64> %d, i32 %a, i32 %b) {
   ret i32 %t4
 }
 
-define i32 @ptestnzc_256_flip(<4 x i64> %c, <4 x i64> %d, i32 %a, i32 %b) {
-; CHECK-LABEL: ptestnzc_256_flip:
+define i32 @ptestnzc_256_invert(<4 x i64> %c, <4 x i64> %d, i32 %a, i32 %b) {
+; CHECK-LABEL: ptestnzc_256_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2

diff  --git a/llvm/test/CodeGen/X86/combine-testpd.ll b/llvm/test/CodeGen/X86/combine-testpd.ll
index b19881fd5066..eaf4eb6bd598 100644
--- a/llvm/test/CodeGen/X86/combine-testpd.ll
+++ b/llvm/test/CodeGen/X86/combine-testpd.ll
@@ -2,34 +2,34 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
 
 ;
-; testz(~X,-1) -> testc(X,-1)
+; testz(~X,Y) -> testc(X,Y)
 ;
 
-define i32 @testpdz_128_invert(<2 x double> %c, i32 %a, i32 %b) {
+define i32 @testpdz_128_invert(<2 x double> %c, <2 x double> %d, i32 %a, i32 %b) {
 ; CHECK-LABEL: testpdz_128_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    vtestpd %xmm1, %xmm0
 ; CHECK-NEXT:    cmovnel %esi, %eax
 ; CHECK-NEXT:    retq
   %t0 = bitcast <2 x double> %c to <2 x i64>
   %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
   %t2 = bitcast <2 x i64> %t1 to <2 x double>
-  %t3 = call i32 @llvm.x86.avx.vtestz.pd(<2 x double> %t2, <2 x double> <double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF>)
+  %t3 = call i32 @llvm.x86.avx.vtestz.pd(<2 x double> %t2, <2 x double> %d)
   %t4 = icmp ne i32 %t3, 0
   %t5 = select i1 %t4, i32 %a, i32 %b
   ret i32 %t5
 }
 
-define i32 @testpdz_256_invert(<4 x double> %c, i32 %a, i32 %b) {
+define i32 @testpdz_256_invert(<4 x double> %c, <4 x double> %d, i32 %a, i32 %b) {
 ; CHECK-LABEL: testpdz_256_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
-; CHECK-NEXT:    vxorps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vcmptrueps %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vxorps %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    vtestpd %ymm1, %ymm0
 ; CHECK-NEXT:    cmovnel %esi, %eax
 ; CHECK-NEXT:    vzeroupper
@@ -37,41 +37,41 @@ define i32 @testpdz_256_invert(<4 x double> %c, i32 %a, i32 %b) {
   %t0 = bitcast <4 x double> %c to <4 x i64>
   %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
   %t2 = bitcast <4 x i64> %t1 to <4 x double>
-  %t3 = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %t2, <4 x double> <double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF>)
+  %t3 = call i32 @llvm.x86.avx.vtestz.pd.256(<4 x double> %t2, <4 x double> %d)
   %t4 = icmp ne i32 %t3, 0
   %t5 = select i1 %t4, i32 %a, i32 %b
   ret i32 %t5
 }
 
 ;
-; testc(~X,-1) -> testz(X,-1)
+; testc(~X,Y) -> testz(X,Y)
 ;
 
-define i32 @testpdc_128_invert(<2 x double> %c, i32 %a, i32 %b) {
+define i32 @testpdc_128_invert(<2 x double> %c, <2 x double> %d, i32 %a, i32 %b) {
 ; CHECK-LABEL: testpdc_128_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    vtestpd %xmm1, %xmm0
 ; CHECK-NEXT:    cmovael %esi, %eax
 ; CHECK-NEXT:    retq
   %t0 = bitcast <2 x double> %c to <2 x i64>
   %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
   %t2 = bitcast <2 x i64> %t1 to <2 x double>
-  %t3 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %t2, <2 x double> <double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF>)
+  %t3 = call i32 @llvm.x86.avx.vtestc.pd(<2 x double> %t2, <2 x double> %d)
   %t4 = icmp ne i32 %t3, 0
   %t5 = select i1 %t4, i32 %a, i32 %b
   ret i32 %t5
 }
 
-define i32 @testpdc_256_invert(<4 x double> %c, i32 %a, i32 %b) {
+define i32 @testpdc_256_invert(<4 x double> %c, <4 x double> %d, i32 %a, i32 %b) {
 ; CHECK-LABEL: testpdc_256_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
-; CHECK-NEXT:    vxorps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vcmptrueps %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vxorps %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    vtestpd %ymm1, %ymm0
 ; CHECK-NEXT:    cmovael %esi, %eax
 ; CHECK-NEXT:    vzeroupper
@@ -79,49 +79,7 @@ define i32 @testpdc_256_invert(<4 x double> %c, i32 %a, i32 %b) {
   %t0 = bitcast <4 x double> %c to <4 x i64>
   %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
   %t2 = bitcast <4 x i64> %t1 to <4 x double>
-  %t3 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %t2, <4 x double> <double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF>)
-  %t4 = icmp ne i32 %t3, 0
-  %t5 = select i1 %t4, i32 %a, i32 %b
-  ret i32 %t5
-}
-
-;
-; testnzc(~X,-1) -> testnzc(X,-1)
-;
-
-define i32 @testpdnzc_128_invert(<2 x double> %c, i32 %a, i32 %b) {
-; CHECK-LABEL: testpdnzc_128_invert:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vtestpd %xmm1, %xmm0
-; CHECK-NEXT:    cmovbel %esi, %eax
-; CHECK-NEXT:    retq
-  %t0 = bitcast <2 x double> %c to <2 x i64>
-  %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
-  %t2 = bitcast <2 x i64> %t1 to <2 x double>
-  %t3 = call i32 @llvm.x86.avx.vtestnzc.pd(<2 x double> %t2, <2 x double> <double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF>)
-  %t4 = icmp ne i32 %t3, 0
-  %t5 = select i1 %t4, i32 %a, i32 %b
-  ret i32 %t5
-}
-
-define i32 @testpdnzc_256_invert(<4 x double> %c, i32 %a, i32 %b) {
-; CHECK-LABEL: testpdnzc_256_invert:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
-; CHECK-NEXT:    vxorps %ymm1, %ymm0, %ymm0
-; CHECK-NEXT:    vtestpd %ymm1, %ymm0
-; CHECK-NEXT:    cmovbel %esi, %eax
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
-  %t0 = bitcast <4 x double> %c to <4 x i64>
-  %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
-  %t2 = bitcast <4 x i64> %t1 to <4 x double>
-  %t3 = call i32 @llvm.x86.avx.vtestnzc.pd.256(<4 x double> %t2, <4 x double> <double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF, double 0xFFFFFFFFFFFFFFFF>)
+  %t3 = call i32 @llvm.x86.avx.vtestc.pd.256(<4 x double> %t2, <4 x double> %d)
   %t4 = icmp ne i32 %t3, 0
   %t5 = select i1 %t4, i32 %a, i32 %b
   ret i32 %t5
@@ -131,8 +89,8 @@ define i32 @testpdnzc_256_invert(<4 x double> %c, i32 %a, i32 %b) {
 ; testnzc(~X,Y) -> testnzc(X,Y)
 ;
 
-define i32 @testpdnzc_128_flip(<2 x double> %c, <2 x double> %d, i32 %a, i32 %b) {
-; CHECK-LABEL: testpdnzc_128_flip:
+define i32 @testpdnzc_128_invert(<2 x double> %c, <2 x double> %d, i32 %a, i32 %b) {
+; CHECK-LABEL: testpdnzc_128_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -149,8 +107,8 @@ define i32 @testpdnzc_128_flip(<2 x double> %c, <2 x double> %d, i32 %a, i32 %b)
   ret i32 %t5
 }
 
-define i32 @testpdnzc_256_flip(<4 x double> %c, <4 x double> %d, i32 %a, i32 %b) {
-; CHECK-LABEL: testpdnzc_256_flip:
+define i32 @testpdnzc_256_invert(<4 x double> %c, <4 x double> %d, i32 %a, i32 %b) {
+; CHECK-LABEL: testpdnzc_256_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2

diff  --git a/llvm/test/CodeGen/X86/combine-testps.ll b/llvm/test/CodeGen/X86/combine-testps.ll
index 06bc0936f998..85537ef0b76d 100644
--- a/llvm/test/CodeGen/X86/combine-testps.ll
+++ b/llvm/test/CodeGen/X86/combine-testps.ll
@@ -2,34 +2,34 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
 
 ;
-; testz(~X,-1) -> testc(X,-1)
+; testz(~X,Y) -> testc(X,Y)
 ;
 
-define i32 @testpsz_128_invert(<4 x float> %c, i32 %a, i32 %b) {
+define i32 @testpsz_128_invert(<4 x float> %c, <4 x float> %d, i32 %a, i32 %b) {
 ; CHECK-LABEL: testpsz_128_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    vtestps %xmm1, %xmm0
 ; CHECK-NEXT:    cmovnel %esi, %eax
 ; CHECK-NEXT:    retq
   %t0 = bitcast <4 x float> %c to <2 x i64>
   %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
   %t2 = bitcast <2 x i64> %t1 to <4 x float>
-  %t3 = call i32 @llvm.x86.avx.vtestz.ps(<4 x float> %t2, <4 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>)
+  %t3 = call i32 @llvm.x86.avx.vtestz.ps(<4 x float> %t2, <4 x float> %d)
   %t4 = icmp ne i32 %t3, 0
   %t5 = select i1 %t4, i32 %a, i32 %b
   ret i32 %t5
 }
 
-define i32 @testpsz_256_invert(<8 x float> %c, i32 %a, i32 %b) {
+define i32 @testpsz_256_invert(<8 x float> %c, <8 x float> %d, i32 %a, i32 %b) {
 ; CHECK-LABEL: testpsz_256_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
-; CHECK-NEXT:    vxorps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vcmptrueps %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vxorps %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    vtestps %ymm1, %ymm0
 ; CHECK-NEXT:    cmovnel %esi, %eax
 ; CHECK-NEXT:    vzeroupper
@@ -37,41 +37,41 @@ define i32 @testpsz_256_invert(<8 x float> %c, i32 %a, i32 %b) {
   %t0 = bitcast <8 x float> %c to <4 x i64>
   %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
   %t2 = bitcast <4 x i64> %t1 to <8 x float>
-  %t3 = call i32 @llvm.x86.avx.vtestz.ps.256(<8 x float> %t2, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>)
+  %t3 = call i32 @llvm.x86.avx.vtestz.ps.256(<8 x float> %t2, <8 x float> %d)
   %t4 = icmp ne i32 %t3, 0
   %t5 = select i1 %t4, i32 %a, i32 %b
   ret i32 %t5
 }
 
 ;
-; testc(~X,-1) -> testz(X,-1)
+; testc(~X,Y) -> testz(X,Y)
 ;
 
-define i32 @testpsc_128_invert(<4 x float> %c, i32 %a, i32 %b) {
+define i32 @testpsc_128_invert(<4 x float> %c, <4 x float> %d, i32 %a, i32 %b) {
 ; CHECK-LABEL: testpsc_128_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vpxor %xmm2, %xmm0, %xmm0
 ; CHECK-NEXT:    vtestps %xmm1, %xmm0
 ; CHECK-NEXT:    cmovael %esi, %eax
 ; CHECK-NEXT:    retq
   %t0 = bitcast <4 x float> %c to <2 x i64>
   %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
   %t2 = bitcast <2 x i64> %t1 to <4 x float>
-  %t3 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %t2, <4 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>)
+  %t3 = call i32 @llvm.x86.avx.vtestc.ps(<4 x float> %t2, <4 x float> %d)
   %t4 = icmp ne i32 %t3, 0
   %t5 = select i1 %t4, i32 %a, i32 %b
   ret i32 %t5
 }
 
-define i32 @testpsc_256_invert(<8 x float> %c, i32 %a, i32 %b) {
+define i32 @testpsc_256_invert(<8 x float> %c, <8 x float> %d, i32 %a, i32 %b) {
 ; CHECK-LABEL: testpsc_256_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
-; CHECK-NEXT:    vxorps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; CHECK-NEXT:    vcmptrueps %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vxorps %ymm2, %ymm0, %ymm0
 ; CHECK-NEXT:    vtestps %ymm1, %ymm0
 ; CHECK-NEXT:    cmovael %esi, %eax
 ; CHECK-NEXT:    vzeroupper
@@ -79,49 +79,7 @@ define i32 @testpsc_256_invert(<8 x float> %c, i32 %a, i32 %b) {
   %t0 = bitcast <8 x float> %c to <4 x i64>
   %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
   %t2 = bitcast <4 x i64> %t1 to <8 x float>
-  %t3 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %t2, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>)
-  %t4 = icmp ne i32 %t3, 0
-  %t5 = select i1 %t4, i32 %a, i32 %b
-  ret i32 %t5
-}
-
-;
-; testnzc(~X,-1) -> testnzc(X,-1)
-;
-
-define i32 @testpsnzc_128_invert(<4 x float> %c, i32 %a, i32 %b) {
-; CHECK-LABEL: testpsnzc_128_invert:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vtestps %xmm1, %xmm0
-; CHECK-NEXT:    cmovbel %esi, %eax
-; CHECK-NEXT:    retq
-  %t0 = bitcast <4 x float> %c to <2 x i64>
-  %t1 = xor <2 x i64> %t0, <i64 -1, i64 -1>
-  %t2 = bitcast <2 x i64> %t1 to <4 x float>
-  %t3 = call i32 @llvm.x86.avx.vtestnzc.ps(<4 x float> %t2, <4 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>)
-  %t4 = icmp ne i32 %t3, 0
-  %t5 = select i1 %t4, i32 %a, i32 %b
-  ret i32 %t5
-}
-
-define i32 @testpsnzc_256_invert(<8 x float> %c, i32 %a, i32 %b) {
-; CHECK-LABEL: testpsnzc_256_invert:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    movl %edi, %eax
-; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; CHECK-NEXT:    vcmptrueps %ymm1, %ymm1, %ymm1
-; CHECK-NEXT:    vxorps %ymm1, %ymm0, %ymm0
-; CHECK-NEXT:    vtestps %ymm1, %ymm0
-; CHECK-NEXT:    cmovbel %esi, %eax
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
-  %t0 = bitcast <8 x float> %c to <4 x i64>
-  %t1 = xor <4 x i64> %t0, <i64 -1, i64 -1, i64 -1, i64 -1>
-  %t2 = bitcast <4 x i64> %t1 to <8 x float>
-  %t3 = call i32 @llvm.x86.avx.vtestnzc.ps.256(<8 x float> %t2, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>)
+  %t3 = call i32 @llvm.x86.avx.vtestc.ps.256(<8 x float> %t2, <8 x float> %d)
   %t4 = icmp ne i32 %t3, 0
   %t5 = select i1 %t4, i32 %a, i32 %b
   ret i32 %t5
@@ -131,8 +89,8 @@ define i32 @testpsnzc_256_invert(<8 x float> %c, i32 %a, i32 %b) {
 ; testnzc(~X,Y) -> testnzc(X,Y)
 ;
 
-define i32 @testpsnzc_128_flip(<4 x float> %c, <4 x float> %d, i32 %a, i32 %b) {
-; CHECK-LABEL: testpsnzc_128_flip:
+define i32 @testpsnzc_128_invert(<4 x float> %c, <4 x float> %d, i32 %a, i32 %b) {
+; CHECK-LABEL: testpsnzc_128_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
@@ -149,8 +107,8 @@ define i32 @testpsnzc_128_flip(<4 x float> %c, <4 x float> %d, i32 %a, i32 %b) {
   ret i32 %t5
 }
 
-define i32 @testpsnzc_256_flip(<8 x float> %c, <8 x float> %d, i32 %a, i32 %b) {
-; CHECK-LABEL: testpsnzc_256_flip:
+define i32 @testpsnzc_256_invert(<8 x float> %c, <8 x float> %d, i32 %a, i32 %b) {
+; CHECK-LABEL: testpsnzc_256_invert:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2


        


More information about the llvm-commits mailing list