[llvm] 9fecbd4 - [X86] vector-compare-any_of.ll - add AVX1OR2 check-prefix and make AVX a common check-prefix

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Apr 1 13:53:20 PDT 2023


Author: Simon Pilgrim
Date: 2023-04-01T21:53:03+01:00
New Revision: 9fecbd4ae69e0d41dd94250b0224ce0814f92d9d

URL: https://github.com/llvm/llvm-project/commit/9fecbd4ae69e0d41dd94250b0224ce0814f92d9d
DIFF: https://github.com/llvm/llvm-project/commit/9fecbd4ae69e0d41dd94250b0224ce0814f92d9d.diff

LOG: [X86] vector-compare-any_of.ll - add AVX1OR2 check-prefix and make AVX a common check-prefix

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/vector-compare-any_of.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/vector-compare-any_of.ll b/llvm/test/CodeGen/X86/vector-compare-any_of.ll
index 5281d5260176..67c0e937040b 100644
--- a/llvm/test/CodeGen/X86/vector-compare-any_of.ll
+++ b/llvm/test/CodeGen/X86/vector-compare-any_of.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2   | FileCheck %s --check-prefixes=SSE,SSE2
 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=SSE,SSE42
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx    | FileCheck %s --check-prefixes=AVX,AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2   | FileCheck %s --check-prefixes=AVX,AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx    | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2   | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512
 
 define i64 @test_v2f64_sext(<2 x double> %a0, <2 x double> %a1) {
 ; SSE-LABEL: test_v2f64_sext:
@@ -23,15 +23,6 @@ define i64 @test_v2f64_sext(<2 x double> %a0, <2 x double> %a1) {
 ; AVX-NEXT:    negl %ecx
 ; AVX-NEXT:    sbbq %rax, %rax
 ; AVX-NEXT:    retq
-;
-; AVX512-LABEL: test_v2f64_sext:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vcmpltpd %xmm0, %xmm1, %xmm0
-; AVX512-NEXT:    vmovmskpd %xmm0, %ecx
-; AVX512-NEXT:    xorl %eax, %eax
-; AVX512-NEXT:    negl %ecx
-; AVX512-NEXT:    sbbq %rax, %rax
-; AVX512-NEXT:    retq
   %c = fcmp ogt <2 x double> %a0, %a1
   %s = sext <2 x i1> %c to <2 x i64>
   %1 = shufflevector <2 x i64> %s, <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
@@ -61,16 +52,6 @@ define i64 @test_v4f64_sext(<4 x double> %a0, <4 x double> %a1) {
 ; AVX-NEXT:    sbbq %rax, %rax
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
-;
-; AVX512-LABEL: test_v4f64_sext:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vcmpltpd %ymm0, %ymm1, %ymm0
-; AVX512-NEXT:    vmovmskpd %ymm0, %ecx
-; AVX512-NEXT:    xorl %eax, %eax
-; AVX512-NEXT:    negl %ecx
-; AVX512-NEXT:    sbbq %rax, %rax
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
   %c = fcmp ogt <4 x double> %a0, %a1
   %s = sext <4 x i1> %c to <4 x i64>
   %1 = shufflevector <4 x i64> %s, <4 x i64> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
@@ -93,17 +74,17 @@ define i64 @test_v4f64_legal_sext(<4 x double> %a0, <4 x double> %a1) {
 ; SSE-NEXT:    sbbq %rax, %rax
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: test_v4f64_legal_sext:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vcmpltpd %ymm0, %ymm1, %ymm0
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vmovmskps %xmm0, %ecx
-; AVX-NEXT:    xorl %eax, %eax
-; AVX-NEXT:    negl %ecx
-; AVX-NEXT:    sbbq %rax, %rax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; AVX1OR2-LABEL: test_v4f64_legal_sext:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vcmpltpd %ymm0, %ymm1, %ymm0
+; AVX1OR2-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1OR2-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
+; AVX1OR2-NEXT:    vmovmskps %xmm0, %ecx
+; AVX1OR2-NEXT:    xorl %eax, %eax
+; AVX1OR2-NEXT:    negl %ecx
+; AVX1OR2-NEXT:    sbbq %rax, %rax
+; AVX1OR2-NEXT:    vzeroupper
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v4f64_legal_sext:
 ; AVX512:       # %bb.0:
@@ -145,15 +126,6 @@ define i32 @test_v4f32_sext(<4 x float> %a0, <4 x float> %a1) {
 ; AVX-NEXT:    negl %ecx
 ; AVX-NEXT:    sbbl %eax, %eax
 ; AVX-NEXT:    retq
-;
-; AVX512-LABEL: test_v4f32_sext:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vcmpltps %xmm0, %xmm1, %xmm0
-; AVX512-NEXT:    vmovmskps %xmm0, %ecx
-; AVX512-NEXT:    xorl %eax, %eax
-; AVX512-NEXT:    negl %ecx
-; AVX512-NEXT:    sbbl %eax, %eax
-; AVX512-NEXT:    retq
   %c = fcmp ogt <4 x float> %a0, %a1
   %s = sext <4 x i1> %c to <4 x i32>
   %1 = shufflevector <4 x i32> %s, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
@@ -185,16 +157,6 @@ define i32 @test_v8f32_sext(<8 x float> %a0, <8 x float> %a1) {
 ; AVX-NEXT:    sbbl %eax, %eax
 ; AVX-NEXT:    vzeroupper
 ; AVX-NEXT:    retq
-;
-; AVX512-LABEL: test_v8f32_sext:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vcmpltps %ymm0, %ymm1, %ymm0
-; AVX512-NEXT:    vmovmskps %ymm0, %ecx
-; AVX512-NEXT:    xorl %eax, %eax
-; AVX512-NEXT:    negl %ecx
-; AVX512-NEXT:    sbbl %eax, %eax
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
   %c = fcmp ogt <8 x float> %a0, %a1
   %s = sext <8 x i1> %c to <8 x i32>
   %1 = shufflevector <8 x i32> %s, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -219,17 +181,17 @@ define i32 @test_v8f32_legal_sext(<8 x float> %a0, <8 x float> %a1) {
 ; SSE-NEXT:    sbbl %eax, %eax
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: test_v8f32_legal_sext:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vcmpltps %ymm0, %ymm1, %ymm0
-; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpmovmskb %xmm0, %ecx
-; AVX-NEXT:    xorl %eax, %eax
-; AVX-NEXT:    negl %ecx
-; AVX-NEXT:    sbbl %eax, %eax
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; AVX1OR2-LABEL: test_v8f32_legal_sext:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vcmpltps %ymm0, %ymm1, %ymm0
+; AVX1OR2-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1OR2-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
+; AVX1OR2-NEXT:    vpmovmskb %xmm0, %ecx
+; AVX1OR2-NEXT:    xorl %eax, %eax
+; AVX1OR2-NEXT:    negl %ecx
+; AVX1OR2-NEXT:    sbbl %eax, %eax
+; AVX1OR2-NEXT:    vzeroupper
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: test_v8f32_legal_sext:
 ; AVX512:       # %bb.0:
@@ -289,15 +251,6 @@ define i64 @test_v2i64_sext(<2 x i64> %a0, <2 x i64> %a1) {
 ; AVX-NEXT:    negl %ecx
 ; AVX-NEXT:    sbbq %rax, %rax
 ; AVX-NEXT:    retq
-;
-; AVX512-LABEL: test_v2i64_sext:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovmskpd %xmm0, %ecx
-; AVX512-NEXT:    xorl %eax, %eax
-; AVX512-NEXT:    negl %ecx
-; AVX512-NEXT:    sbbq %rax, %rax
-; AVX512-NEXT:    retq
   %c = icmp sgt <2 x i64> %a0, %a1
   %s = sext <2 x i1> %c to <2 x i64>
   %1 = shufflevector <2 x i64> %s, <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
@@ -491,15 +444,6 @@ define i32 @test_v4i32_sext(<4 x i32> %a0, <4 x i32> %a1) {
 ; AVX-NEXT:    negl %ecx
 ; AVX-NEXT:    sbbl %eax, %eax
 ; AVX-NEXT:    retq
-;
-; AVX512-LABEL: test_v4i32_sext:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vmovmskps %xmm0, %ecx
-; AVX512-NEXT:    xorl %eax, %eax
-; AVX512-NEXT:    negl %ecx
-; AVX512-NEXT:    sbbl %eax, %eax
-; AVX512-NEXT:    retq
   %c = icmp sgt <4 x i32> %a0, %a1
   %s = sext <4 x i1> %c to <4 x i32>
   %1 = shufflevector <4 x i32> %s, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
@@ -648,16 +592,6 @@ define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
 ; AVX-NEXT:    sbbl %eax, %eax
 ; AVX-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX-NEXT:    retq
-;
-; AVX512-LABEL: test_v8i16_sext:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpmovmskb %xmm0, %ecx
-; AVX512-NEXT:    xorl %eax, %eax
-; AVX512-NEXT:    negl %ecx
-; AVX512-NEXT:    sbbl %eax, %eax
-; AVX512-NEXT:    # kill: def $ax killed $ax killed $eax
-; AVX512-NEXT:    retq
   %c = icmp sgt <8 x i16> %a0, %a1
   %s = sext <8 x i1> %c to <8 x i16>
   %1 = shufflevector <8 x i16> %s, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -818,16 +752,6 @@ define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
 ; AVX-NEXT:    sbbl %eax, %eax
 ; AVX-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX-NEXT:    retq
-;
-; AVX512-LABEL: test_v16i8_sext:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vpmovmskb %xmm0, %ecx
-; AVX512-NEXT:    xorl %eax, %eax
-; AVX512-NEXT:    negl %ecx
-; AVX512-NEXT:    sbbl %eax, %eax
-; AVX512-NEXT:    # kill: def $al killed $al killed $eax
-; AVX512-NEXT:    retq
   %c = icmp sgt <16 x i8> %a0, %a1
   %s = sext <16 x i1> %c to <16 x i8>
   %1 = shufflevector <16 x i8> %s, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -916,13 +840,13 @@ define i1 @bool_reduction_v2f64(<2 x double> %x, <2 x double> %y) {
 ; SSE-NEXT:    setne %al
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: bool_reduction_v2f64:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vcmpltpd %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    vmovmskpd %xmm0, %eax
-; AVX-NEXT:    testl %eax, %eax
-; AVX-NEXT:    setne %al
-; AVX-NEXT:    retq
+; AVX1OR2-LABEL: bool_reduction_v2f64:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vcmpltpd %xmm0, %xmm1, %xmm0
+; AVX1OR2-NEXT:    vmovmskpd %xmm0, %eax
+; AVX1OR2-NEXT:    testl %eax, %eax
+; AVX1OR2-NEXT:    setne %al
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: bool_reduction_v2f64:
 ; AVX512:       # %bb.0:
@@ -947,13 +871,13 @@ define i1 @bool_reduction_v4f32(<4 x float> %x, <4 x float> %y) {
 ; SSE-NEXT:    setne %al
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: bool_reduction_v4f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vcmpeqps %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vmovmskps %xmm0, %eax
-; AVX-NEXT:    testl %eax, %eax
-; AVX-NEXT:    setne %al
-; AVX-NEXT:    retq
+; AVX1OR2-LABEL: bool_reduction_v4f32:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vcmpeqps %xmm1, %xmm0, %xmm0
+; AVX1OR2-NEXT:    vmovmskps %xmm0, %eax
+; AVX1OR2-NEXT:    testl %eax, %eax
+; AVX1OR2-NEXT:    setne %al
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: bool_reduction_v4f32:
 ; AVX512:       # %bb.0:
@@ -982,14 +906,14 @@ define i1 @bool_reduction_v4f64(<4 x double> %x, <4 x double> %y) {
 ; SSE-NEXT:    setne %al
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: bool_reduction_v4f64:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vcmplepd %ymm0, %ymm1, %ymm0
-; AVX-NEXT:    vmovmskpd %ymm0, %eax
-; AVX-NEXT:    testl %eax, %eax
-; AVX-NEXT:    setne %al
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; AVX1OR2-LABEL: bool_reduction_v4f64:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vcmplepd %ymm0, %ymm1, %ymm0
+; AVX1OR2-NEXT:    vmovmskpd %ymm0, %eax
+; AVX1OR2-NEXT:    testl %eax, %eax
+; AVX1OR2-NEXT:    setne %al
+; AVX1OR2-NEXT:    vzeroupper
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: bool_reduction_v4f64:
 ; AVX512:       # %bb.0:
@@ -1019,14 +943,14 @@ define i1 @bool_reduction_v8f32(<8 x float> %x, <8 x float> %y) {
 ; SSE-NEXT:    setne %al
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: bool_reduction_v8f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vcmpneqps %ymm1, %ymm0, %ymm0
-; AVX-NEXT:    vmovmskps %ymm0, %eax
-; AVX-NEXT:    testl %eax, %eax
-; AVX-NEXT:    setne %al
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; AVX1OR2-LABEL: bool_reduction_v8f32:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vcmpneqps %ymm1, %ymm0, %ymm0
+; AVX1OR2-NEXT:    vmovmskps %ymm0, %eax
+; AVX1OR2-NEXT:    testl %eax, %eax
+; AVX1OR2-NEXT:    setne %al
+; AVX1OR2-NEXT:    vzeroupper
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: bool_reduction_v8f32:
 ; AVX512:       # %bb.0:
@@ -1063,12 +987,12 @@ define i1 @bool_reduction_v2i64(<2 x i64> %x, <2 x i64> %y) {
 ; SSE42-NEXT:    setne %al
 ; SSE42-NEXT:    retq
 ;
-; AVX-LABEL: bool_reduction_v2i64:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vptest %xmm0, %xmm0
-; AVX-NEXT:    setne %al
-; AVX-NEXT:    retq
+; AVX1OR2-LABEL: bool_reduction_v2i64:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1OR2-NEXT:    vptest %xmm0, %xmm0
+; AVX1OR2-NEXT:    setne %al
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: bool_reduction_v2i64:
 ; AVX512:       # %bb.0:
@@ -1104,13 +1028,13 @@ define i1 @bool_reduction_v4i32(<4 x i32> %x, <4 x i32> %y) {
 ; SSE42-NEXT:    setne %al
 ; SSE42-NEXT:    retq
 ;
-; AVX-LABEL: bool_reduction_v4i32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpminud %xmm1, %xmm0, %xmm1
-; AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vptest %xmm0, %xmm0
-; AVX-NEXT:    setne %al
-; AVX-NEXT:    retq
+; AVX1OR2-LABEL: bool_reduction_v4i32:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vpminud %xmm1, %xmm0, %xmm1
+; AVX1OR2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1OR2-NEXT:    vptest %xmm0, %xmm0
+; AVX1OR2-NEXT:    setne %al
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: bool_reduction_v4i32:
 ; AVX512:       # %bb.0:
@@ -1137,13 +1061,13 @@ define i1 @bool_reduction_v8i16(<8 x i16> %x, <8 x i16> %y) {
 ; SSE-NEXT:    setne %al
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: bool_reduction_v8i16:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm0
-; AVX-NEXT:    vpmovmskb %xmm0, %eax
-; AVX-NEXT:    testl %eax, %eax
-; AVX-NEXT:    setne %al
-; AVX-NEXT:    retq
+; AVX1OR2-LABEL: bool_reduction_v8i16:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm0
+; AVX1OR2-NEXT:    vpmovmskb %xmm0, %eax
+; AVX1OR2-NEXT:    testl %eax, %eax
+; AVX1OR2-NEXT:    setne %al
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: bool_reduction_v8i16:
 ; AVX512:       # %bb.0:
@@ -1172,13 +1096,13 @@ define i1 @bool_reduction_v16i8(<16 x i8> %x, <16 x i8> %y) {
 ; SSE-NEXT:    setne %al
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: bool_reduction_v16i8:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpmovmskb %xmm0, %eax
-; AVX-NEXT:    testl %eax, %eax
-; AVX-NEXT:    setne %al
-; AVX-NEXT:    retq
+; AVX1OR2-LABEL: bool_reduction_v16i8:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm0
+; AVX1OR2-NEXT:    vpmovmskb %xmm0, %eax
+; AVX1OR2-NEXT:    testl %eax, %eax
+; AVX1OR2-NEXT:    setne %al
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512-LABEL: bool_reduction_v16i8:
 ; AVX512:       # %bb.0:


        


More information about the llvm-commits mailing list