[llvm] b43b129 - [X86] Add AVX1OR2 common check-prefix to vector-reduce-and-cmp.ll and vector-reduce-or-cmp.ll

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Apr 1 07:20:11 PDT 2023


Author: Simon Pilgrim
Date: 2023-04-01T15:19:56+01:00
New Revision: b43b1291f50e5682f5c87b6cee45e4097a5d95ac

URL: https://github.com/llvm/llvm-project/commit/b43b1291f50e5682f5c87b6cee45e4097a5d95ac
DIFF: https://github.com/llvm/llvm-project/commit/b43b1291f50e5682f5c87b6cee45e4097a5d95ac.diff

LOG: [X86] Add AVX1OR2 common check-prefix to vector-reduce-and-cmp.ll and vector-reduce-or-cmp.ll

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/vector-reduce-and-cmp.ll
    llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/vector-reduce-and-cmp.ll b/llvm/test/CodeGen/X86/vector-reduce-and-cmp.ll
index a3323ecd8fda..80728793bbf3 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-and-cmp.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-and-cmp.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX512,AVX512BW
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512BWVL
@@ -1178,6 +1178,7 @@ declare i8 @llvm.vector.reduce.and.v32i8(<32 x i8>)
 declare i8 @llvm.vector.reduce.and.v64i8(<64 x i8>)
 declare i8 @llvm.vector.reduce.and.v128i8(<128 x i8>)
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; AVX1OR2: {{.*}}
 ; AVX512BW: {{.*}}
 ; AVX512BWVL: {{.*}}
 ; AVX512F: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
index 584bfd4a11eb..43df251f6d33 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX512,AVX512BW
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512BWVL
@@ -829,17 +829,11 @@ define i1 @trunc_v2i64(<2 x i64> %a0) {
 ; SSE41-NEXT:    sete %al
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: trunc_v2i64:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX1-NEXT:    sete %al
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: trunc_v2i64:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; AVX2-NEXT:    sete %al
-; AVX2-NEXT:    retq
+; AVX1OR2-LABEL: trunc_v2i64:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vptest {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; AVX1OR2-NEXT:    sete %al
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512F-LABEL: trunc_v2i64:
 ; AVX512F:       # %bb.0:
@@ -1094,43 +1088,24 @@ define i32 @mask_v3i1(<3 x i32> %a, <3 x i32> %b) {
 ; SSE41-NEXT:    movl $1, %eax
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: mask_v3i1:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpextrd $1, %xmm0, %eax
-; AVX1-NEXT:    vmovd %xmm0, %ecx
-; AVX1-NEXT:    orl %eax, %ecx
-; AVX1-NEXT:    vpextrd $2, %xmm0, %eax
-; AVX1-NEXT:    orl %ecx, %eax
-; AVX1-NEXT:    testb $1, %al
-; AVX1-NEXT:    je .LBB27_2
-; AVX1-NEXT:  # %bb.1:
-; AVX1-NEXT:    xorl %eax, %eax
-; AVX1-NEXT:    retq
-; AVX1-NEXT:  .LBB27_2:
-; AVX1-NEXT:    movl $1, %eax
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: mask_v3i1:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpextrd $1, %xmm0, %eax
-; AVX2-NEXT:    vmovd %xmm0, %ecx
-; AVX2-NEXT:    orl %eax, %ecx
-; AVX2-NEXT:    vpextrd $2, %xmm0, %eax
-; AVX2-NEXT:    orl %ecx, %eax
-; AVX2-NEXT:    testb $1, %al
-; AVX2-NEXT:    je .LBB27_2
-; AVX2-NEXT:  # %bb.1:
-; AVX2-NEXT:    xorl %eax, %eax
-; AVX2-NEXT:    retq
-; AVX2-NEXT:  .LBB27_2:
-; AVX2-NEXT:    movl $1, %eax
-; AVX2-NEXT:    retq
+; AVX1OR2-LABEL: mask_v3i1:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX1OR2-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX1OR2-NEXT:    vpxor %xmm1, %xmm0, %xmm0
+; AVX1OR2-NEXT:    vpextrd $1, %xmm0, %eax
+; AVX1OR2-NEXT:    vmovd %xmm0, %ecx
+; AVX1OR2-NEXT:    orl %eax, %ecx
+; AVX1OR2-NEXT:    vpextrd $2, %xmm0, %eax
+; AVX1OR2-NEXT:    orl %ecx, %eax
+; AVX1OR2-NEXT:    testb $1, %al
+; AVX1OR2-NEXT:    je .LBB27_2
+; AVX1OR2-NEXT:  # %bb.1:
+; AVX1OR2-NEXT:    xorl %eax, %eax
+; AVX1OR2-NEXT:    retq
+; AVX1OR2-NEXT:  .LBB27_2:
+; AVX1OR2-NEXT:    movl $1, %eax
+; AVX1OR2-NEXT:    retq
 ;
 ; AVX512F-LABEL: mask_v3i1:
 ; AVX512F:       # %bb.0:


        


More information about the llvm-commits mailing list