[llvm] 8340b41 - [X86] vector-reduce-and-cmp.ll - and reductions should compare against -1 not 0 for allof style comparisons
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sat Apr 1 05:58:29 PDT 2023
Author: Simon Pilgrim
Date: 2023-04-01T13:58:18+01:00
New Revision: 8340b4140daf3c4a04b88524a5eae6a662089439
URL: https://github.com/llvm/llvm-project/commit/8340b4140daf3c4a04b88524a5eae6a662089439
DIFF: https://github.com/llvm/llvm-project/commit/8340b4140daf3c4a04b88524a5eae6a662089439.diff
LOG: [X86] vector-reduce-and-cmp.ll - and reductions should compare against -1 not 0 for allof style comparisons
Fix tests so we actually test what was intended to complement vector-reduce-or-cmp.ll - this was a copy+paste typo from the vector-reduce-or-cmp.ll file (where we should compare against 0) - comparing reduce_and against 0 doesn't tell us much (other than no common set bits).
Added:
Modified:
llvm/test/CodeGen/X86/vector-reduce-and-cmp.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/vector-reduce-and-cmp.ll b/llvm/test/CodeGen/X86/vector-reduce-and-cmp.ll
index 1406069bde65..5770c29b132d 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-and-cmp.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-and-cmp.ll
@@ -16,7 +16,7 @@ define i1 @test_v2i64(<2 x i64> %a0) {
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: movq %xmm1, %rax
-; SSE-NEXT: testq %rax, %rax
+; SSE-NEXT: cmpq $-1, %rax
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
@@ -25,11 +25,11 @@ define i1 @test_v2i64(<2 x i64> %a0) {
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovq %xmm0, %rax
-; AVX-NEXT: testq %rax, %rax
+; AVX-NEXT: cmpq $-1, %rax
; AVX-NEXT: sete %al
; AVX-NEXT: retq
%1 = call i64 @llvm.vector.reduce.and.v2i64(<2 x i64> %a0)
- %2 = icmp eq i64 %1, 0
+ %2 = icmp eq i64 %1, -1
ret i1 %2
}
@@ -40,7 +40,7 @@ define i1 @test_v4i64(<4 x i64> %a0) {
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: movq %xmm1, %rax
-; SSE-NEXT: testq %rax, %rax
+; SSE-NEXT: cmpq $-1, %rax
; SSE-NEXT: setne %al
; SSE-NEXT: retq
;
@@ -51,7 +51,7 @@ define i1 @test_v4i64(<4 x i64> %a0) {
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: testq %rax, %rax
+; AVX1-NEXT: cmpq $-1, %rax
; AVX1-NEXT: setne %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -63,7 +63,7 @@ define i1 @test_v4i64(<4 x i64> %a0) {
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: testq %rax, %rax
+; AVX2-NEXT: cmpq $-1, %rax
; AVX2-NEXT: setne %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -75,12 +75,12 @@ define i1 @test_v4i64(<4 x i64> %a0) {
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: testq %rax, %rax
+; AVX512-NEXT: cmpq $-1, %rax
; AVX512-NEXT: setne %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> %a0)
- %2 = icmp ne i64 %1, 0
+ %2 = icmp ne i64 %1, -1
ret i1 %2
}
@@ -93,7 +93,7 @@ define i1 @test_v8i64(<8 x i64> %a0) {
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: movq %xmm1, %rax
-; SSE-NEXT: testq %rax, %rax
+; SSE-NEXT: cmpq $-1, %rax
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
@@ -105,7 +105,7 @@ define i1 @test_v8i64(<8 x i64> %a0) {
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: testq %rax, %rax
+; AVX1-NEXT: cmpq $-1, %rax
; AVX1-NEXT: sete %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -118,7 +118,7 @@ define i1 @test_v8i64(<8 x i64> %a0) {
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: testq %rax, %rax
+; AVX2-NEXT: cmpq $-1, %rax
; AVX2-NEXT: sete %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -132,12 +132,12 @@ define i1 @test_v8i64(<8 x i64> %a0) {
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: testq %rax, %rax
+; AVX512-NEXT: cmpq $-1, %rax
; AVX512-NEXT: sete %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call i64 @llvm.vector.reduce.and.v8i64(<8 x i64> %a0)
- %2 = icmp eq i64 %1, 0
+ %2 = icmp eq i64 %1, -1
ret i1 %2
}
@@ -154,7 +154,7 @@ define i1 @test_v16i64(<16 x i64> %a0) {
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: movq %xmm0, %rax
-; SSE-NEXT: testq %rax, %rax
+; SSE-NEXT: cmpq $-1, %rax
; SSE-NEXT: setne %al
; SSE-NEXT: retq
;
@@ -168,7 +168,7 @@ define i1 @test_v16i64(<16 x i64> %a0) {
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovq %xmm0, %rax
-; AVX1-NEXT: testq %rax, %rax
+; AVX1-NEXT: cmpq $-1, %rax
; AVX1-NEXT: setne %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -183,7 +183,7 @@ define i1 @test_v16i64(<16 x i64> %a0) {
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovq %xmm0, %rax
-; AVX2-NEXT: testq %rax, %rax
+; AVX2-NEXT: cmpq $-1, %rax
; AVX2-NEXT: setne %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -198,12 +198,12 @@ define i1 @test_v16i64(<16 x i64> %a0) {
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovq %xmm0, %rax
-; AVX512-NEXT: testq %rax, %rax
+; AVX512-NEXT: cmpq $-1, %rax
; AVX512-NEXT: setne %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call i64 @llvm.vector.reduce.and.v16i64(<16 x i64> %a0)
- %2 = icmp ne i64 %1, 0
+ %2 = icmp ne i64 %1, -1
ret i1 %2
}
@@ -217,7 +217,7 @@ define i1 @test_v2i32(<2 x i32> %a0) {
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpl $-1, %eax
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
@@ -226,11 +226,11 @@ define i1 @test_v2i32(<2 x i32> %a0) {
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: testl %eax, %eax
+; AVX-NEXT: cmpl $-1, %eax
; AVX-NEXT: sete %al
; AVX-NEXT: retq
%1 = call i32 @llvm.vector.reduce.and.v2i32(<2 x i32> %a0)
- %2 = icmp eq i32 %1, 0
+ %2 = icmp eq i32 %1, -1
ret i1 %2
}
@@ -242,7 +242,7 @@ define i1 @test_v4i32(<4 x i32> %a0) {
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpl $-1, %eax
; SSE-NEXT: setne %al
; SSE-NEXT: retq
;
@@ -253,11 +253,11 @@ define i1 @test_v4i32(<4 x i32> %a0) {
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: testl %eax, %eax
+; AVX-NEXT: cmpl $-1, %eax
; AVX-NEXT: setne %al
; AVX-NEXT: retq
%1 = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %a0)
- %2 = icmp ne i32 %1, 0
+ %2 = icmp ne i32 %1, -1
ret i1 %2
}
@@ -270,7 +270,7 @@ define i1 @test_v8i32(<8 x i32> %a0) {
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpl $-1, %eax
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
@@ -283,7 +283,7 @@ define i1 @test_v8i32(<8 x i32> %a0) {
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: cmpl $-1, %eax
; AVX1-NEXT: sete %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -297,7 +297,7 @@ define i1 @test_v8i32(<8 x i32> %a0) {
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: testl %eax, %eax
+; AVX2-NEXT: cmpl $-1, %eax
; AVX2-NEXT: sete %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -311,12 +311,12 @@ define i1 @test_v8i32(<8 x i32> %a0) {
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: cmpl $-1, %eax
; AVX512-NEXT: sete %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> %a0)
- %2 = icmp eq i32 %1, 0
+ %2 = icmp eq i32 %1, -1
ret i1 %2
}
@@ -331,7 +331,7 @@ define i1 @test_v16i32(<16 x i32> %a0) {
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpl $-1, %eax
; SSE-NEXT: setne %al
; SSE-NEXT: retq
;
@@ -345,7 +345,7 @@ define i1 @test_v16i32(<16 x i32> %a0) {
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,1,1,1]
; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: cmpl $-1, %eax
; AVX1-NEXT: setne %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -360,7 +360,7 @@ define i1 @test_v16i32(<16 x i32> %a0) {
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: testl %eax, %eax
+; AVX2-NEXT: cmpl $-1, %eax
; AVX2-NEXT: setne %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -376,12 +376,12 @@ define i1 @test_v16i32(<16 x i32> %a0) {
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: cmpl $-1, %eax
; AVX512-NEXT: setne %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> %a0)
- %2 = icmp ne i32 %1, 0
+ %2 = icmp ne i32 %1, -1
ret i1 %2
}
@@ -400,7 +400,7 @@ define i1 @test_v32i32(<32 x i32> %a0) {
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpl $-1, %eax
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
@@ -416,7 +416,7 @@ define i1 @test_v32i32(<32 x i32> %a0) {
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,1,1,1]
; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: cmpl $-1, %eax
; AVX1-NEXT: sete %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -433,7 +433,7 @@ define i1 @test_v32i32(<32 x i32> %a0) {
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: testl %eax, %eax
+; AVX2-NEXT: cmpl $-1, %eax
; AVX2-NEXT: sete %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -450,12 +450,12 @@ define i1 @test_v32i32(<32 x i32> %a0) {
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: cmpl $-1, %eax
; AVX512-NEXT: sete %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call i32 @llvm.vector.reduce.and.v32i32(<32 x i32> %a0)
- %2 = icmp eq i32 %1, 0
+ %2 = icmp eq i32 %1, -1
ret i1 %2
}
@@ -470,7 +470,7 @@ define i1 @test_v2i16(<2 x i16> %a0) {
; SSE-NEXT: psrld $16, %xmm1
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpw $-1, %ax
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
@@ -479,11 +479,11 @@ define i1 @test_v2i16(<2 x i16> %a0) {
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: testl %eax, %eax
+; AVX-NEXT: cmpw $-1, %ax
; AVX-NEXT: sete %al
; AVX-NEXT: retq
%1 = call i16 @llvm.vector.reduce.and.v2i16(<2 x i16> %a0)
- %2 = icmp eq i16 %1, 0
+ %2 = icmp eq i16 %1, -1
ret i1 %2
}
@@ -496,7 +496,7 @@ define i1 @test_v4i16(<4 x i16> %a0) {
; SSE-NEXT: psrld $16, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpw $-1, %ax
; SSE-NEXT: setne %al
; SSE-NEXT: retq
;
@@ -507,11 +507,11 @@ define i1 @test_v4i16(<4 x i16> %a0) {
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: testl %eax, %eax
+; AVX-NEXT: cmpw $-1, %ax
; AVX-NEXT: setne %al
; AVX-NEXT: retq
%1 = call i16 @llvm.vector.reduce.and.v4i16(<4 x i16> %a0)
- %2 = icmp ne i16 %1, 0
+ %2 = icmp ne i16 %1, -1
ret i1 %2
}
@@ -526,7 +526,7 @@ define i1 @test_v8i16(<8 x i16> %a0) {
; SSE-NEXT: psrld $16, %xmm1
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpw $-1, %ax
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
@@ -539,11 +539,11 @@ define i1 @test_v8i16(<8 x i16> %a0) {
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: testl %eax, %eax
+; AVX-NEXT: cmpw $-1, %ax
; AVX-NEXT: sete %al
; AVX-NEXT: retq
%1 = call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %a0)
- %2 = icmp eq i16 %1, 0
+ %2 = icmp eq i16 %1, -1
ret i1 %2
}
@@ -559,7 +559,7 @@ define i1 @test_v16i16(<16 x i16> %a0) {
; SSE-NEXT: psrld $16, %xmm1
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpw $-1, %ax
; SSE-NEXT: setne %al
; SSE-NEXT: retq
;
@@ -574,7 +574,7 @@ define i1 @test_v16i16(<16 x i16> %a0) {
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: cmpw $-1, %ax
; AVX1-NEXT: setne %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -590,7 +590,7 @@ define i1 @test_v16i16(<16 x i16> %a0) {
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: testl %eax, %eax
+; AVX2-NEXT: cmpw $-1, %ax
; AVX2-NEXT: setne %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -606,12 +606,12 @@ define i1 @test_v16i16(<16 x i16> %a0) {
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: cmpw $-1, %ax
; AVX512-NEXT: setne %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %a0)
- %2 = icmp ne i16 %1, 0
+ %2 = icmp ne i16 %1, -1
ret i1 %2
}
@@ -629,7 +629,7 @@ define i1 @test_v32i16(<32 x i16> %a0) {
; SSE-NEXT: psrld $16, %xmm1
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpw $-1, %ax
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
@@ -645,7 +645,7 @@ define i1 @test_v32i16(<32 x i16> %a0) {
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: cmpw $-1, %ax
; AVX1-NEXT: sete %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -662,7 +662,7 @@ define i1 @test_v32i16(<32 x i16> %a0) {
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: testl %eax, %eax
+; AVX2-NEXT: cmpw $-1, %ax
; AVX2-NEXT: sete %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -680,12 +680,12 @@ define i1 @test_v32i16(<32 x i16> %a0) {
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: cmpw $-1, %ax
; AVX512-NEXT: sete %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call i16 @llvm.vector.reduce.and.v32i16(<32 x i16> %a0)
- %2 = icmp eq i16 %1, 0
+ %2 = icmp eq i16 %1, -1
ret i1 %2
}
@@ -707,7 +707,7 @@ define i1 @test_v64i16(<64 x i16> %a0) {
; SSE-NEXT: psrld $16, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpw $-1, %ax
; SSE-NEXT: setne %al
; SSE-NEXT: retq
;
@@ -725,7 +725,7 @@ define i1 @test_v64i16(<64 x i16> %a0) {
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: cmpw $-1, %ax
; AVX1-NEXT: setne %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -744,7 +744,7 @@ define i1 @test_v64i16(<64 x i16> %a0) {
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: testl %eax, %eax
+; AVX2-NEXT: cmpw $-1, %ax
; AVX2-NEXT: setne %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -763,12 +763,12 @@ define i1 @test_v64i16(<64 x i16> %a0) {
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: cmpw $-1, %ax
; AVX512-NEXT: setne %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call i16 @llvm.vector.reduce.and.v64i16(<64 x i16> %a0)
- %2 = icmp ne i16 %1, 0
+ %2 = icmp ne i16 %1, -1
ret i1 %2
}
@@ -783,7 +783,7 @@ define i1 @test_v2i8(<2 x i8> %a0) {
; SSE-NEXT: psrlw $8, %xmm1
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: testb %al, %al
+; SSE-NEXT: cmpb $-1, %al
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
@@ -792,11 +792,11 @@ define i1 @test_v2i8(<2 x i8> %a0) {
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: testb %al, %al
+; AVX-NEXT: cmpb $-1, %al
; AVX-NEXT: sete %al
; AVX-NEXT: retq
%1 = call i8 @llvm.vector.reduce.and.v2i8(<2 x i8> %a0)
- %2 = icmp eq i8 %1, 0
+ %2 = icmp eq i8 %1, -1
ret i1 %2
}
@@ -810,7 +810,7 @@ define i1 @test_v4i8(<4 x i8> %a0) {
; SSE-NEXT: psrlw $8, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpb $-1, %al
; SSE-NEXT: setne %al
; SSE-NEXT: retq
;
@@ -821,11 +821,11 @@ define i1 @test_v4i8(<4 x i8> %a0) {
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: testl %eax, %eax
+; AVX-NEXT: cmpb $-1, %al
; AVX-NEXT: setne %al
; AVX-NEXT: retq
%1 = call i8 @llvm.vector.reduce.and.v4i8(<4 x i8> %a0)
- %2 = icmp ne i8 %1, 0
+ %2 = icmp ne i8 %1, -1
ret i1 %2
}
@@ -841,7 +841,7 @@ define i1 @test_v8i8(<8 x i8> %a0) {
; SSE-NEXT: psrlw $8, %xmm1
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpb $-1, %al
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
@@ -854,11 +854,11 @@ define i1 @test_v8i8(<8 x i8> %a0) {
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: testl %eax, %eax
+; AVX-NEXT: cmpb $-1, %al
; AVX-NEXT: sete %al
; AVX-NEXT: retq
%1 = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> %a0)
- %2 = icmp eq i8 %1, 0
+ %2 = icmp eq i8 %1, -1
ret i1 %2
}
@@ -876,7 +876,7 @@ define i1 @test_v16i8(<16 x i8> %a0) {
; SSE-NEXT: psrlw $8, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpb $-1, %al
; SSE-NEXT: setne %al
; SSE-NEXT: retq
;
@@ -891,11 +891,11 @@ define i1 @test_v16i8(<16 x i8> %a0) {
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: testl %eax, %eax
+; AVX-NEXT: cmpb $-1, %al
; AVX-NEXT: setne %al
; AVX-NEXT: retq
%1 = call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %a0)
- %2 = icmp ne i8 %1, 0
+ %2 = icmp ne i8 %1, -1
ret i1 %2
}
@@ -914,7 +914,7 @@ define i1 @test_v32i8(<32 x i8> %a0) {
; SSE-NEXT: psrlw $8, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpb $-1, %al
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
@@ -931,7 +931,7 @@ define i1 @test_v32i8(<32 x i8> %a0) {
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: cmpb $-1, %al
; AVX1-NEXT: sete %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -949,7 +949,7 @@ define i1 @test_v32i8(<32 x i8> %a0) {
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: testl %eax, %eax
+; AVX2-NEXT: cmpb $-1, %al
; AVX2-NEXT: sete %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -967,12 +967,12 @@ define i1 @test_v32i8(<32 x i8> %a0) {
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: testl %eax, %eax
+; AVX512-NEXT: cmpb $-1, %al
; AVX512-NEXT: sete %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %a0)
- %2 = icmp eq i8 %1, 0
+ %2 = icmp eq i8 %1, -1
ret i1 %2
}
@@ -993,7 +993,7 @@ define i1 @test_v64i8(<64 x i8> %a0) {
; SSE-NEXT: psrlw $8, %xmm0
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpb $-1, %al
; SSE-NEXT: setne %al
; SSE-NEXT: retq
;
@@ -1011,7 +1011,7 @@ define i1 @test_v64i8(<64 x i8> %a0) {
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: testl %eax, %eax
+; AVX1-NEXT: cmpb $-1, %al
; AVX1-NEXT: setne %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -1030,7 +1030,7 @@ define i1 @test_v64i8(<64 x i8> %a0) {
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: testl %eax, %eax
+; AVX2-NEXT: cmpb $-1, %al
; AVX2-NEXT: setne %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -1050,12 +1050,12 @@ define i1 @test_v64i8(<64 x i8> %a0) {
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: testb %al, %al
+; AVX512-NEXT: cmpb $-1, %al
; AVX512-NEXT: setne %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call i8 @llvm.vector.reduce.and.v64i8(<64 x i8> %a0)
- %2 = icmp ne i8 %1, 0
+ %2 = icmp ne i8 %1, -1
ret i1 %2
}
@@ -1080,7 +1080,7 @@ define i1 @test_v128i8(<128 x i8> %a0) {
; SSE-NEXT: psrlw $8, %xmm1
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: movd %xmm1, %eax
-; SSE-NEXT: testl %eax, %eax
+; SSE-NEXT: cmpb $-1, %al
; SSE-NEXT: sete %al
; SSE-NEXT: retq
;
@@ -1100,7 +1100,7 @@ define i1 @test_v128i8(<128 x i8> %a0) {
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: testb %al, %al
+; AVX1-NEXT: cmpb $-1, %al
; AVX1-NEXT: sete %al
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -1121,7 +1121,7 @@ define i1 @test_v128i8(<128 x i8> %a0) {
; AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %eax
-; AVX2-NEXT: testb %al, %al
+; AVX2-NEXT: cmpb $-1, %al
; AVX2-NEXT: sete %al
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -1142,12 +1142,12 @@ define i1 @test_v128i8(<128 x i8> %a0) {
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: testb %al, %al
+; AVX512-NEXT: cmpb $-1, %al
; AVX512-NEXT: sete %al
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call i8 @llvm.vector.reduce.and.v128i8(<128 x i8> %a0)
- %2 = icmp eq i8 %1, 0
+ %2 = icmp eq i8 %1, -1
ret i1 %2
}
More information about the llvm-commits
mailing list