[llvm] r279949 - [AVX-512] Add testcases showing that we don't emit 512-bit vpabsb/vpabsw. Will be fixed in a future commit.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 28 15:20:46 PDT 2016


Author: ctopper
Date: Sun Aug 28 17:20:45 2016
New Revision: 279949

URL: http://llvm.org/viewvc/llvm-project?rev=279949&view=rev
Log:
[AVX-512] Add testcases showing that we don't emit 512-bit vpabsb/vpabsw. Will be fixed in a future commit.

Modified:
    llvm/trunk/test/CodeGen/X86/viabs.ll

Modified: llvm/trunk/test/CodeGen/X86/viabs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/viabs.ll?rev=279949&r1=279948&r2=279949&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/viabs.ll (original)
+++ llvm/trunk/test/CodeGen/X86/viabs.ll Sun Aug 28 17:20:45 2016
@@ -1,9 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2    | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3   | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx     | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2    | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2     | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3    | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx      | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2     | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW
 
 define <4 x i32> @test1(<4 x i32> %a) nounwind {
 ; SSE2-LABEL: test1:
@@ -571,3 +572,152 @@ define <8 x i64> @test13(<8 x i64>* %a.p
   %abs = select <8 x i1> %b, <8 x i64> %tmp1neg, <8 x i64> %a
   ret <8 x i64> %abs
 }
+
+define <64 x i8> @test14(<64 x i8> %a) nounwind {
+; SSE2-LABEL: test14:
+; SSE2:       # BB#0:
+; SSE2-NEXT:    pxor %xmm4, %xmm4
+; SSE2-NEXT:    pxor %xmm5, %xmm5
+; SSE2-NEXT:    pcmpgtb %xmm0, %xmm5
+; SSE2-NEXT:    paddb %xmm5, %xmm0
+; SSE2-NEXT:    pxor %xmm5, %xmm0
+; SSE2-NEXT:    pxor %xmm5, %xmm5
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm5
+; SSE2-NEXT:    paddb %xmm5, %xmm1
+; SSE2-NEXT:    pxor %xmm5, %xmm1
+; SSE2-NEXT:    pxor %xmm5, %xmm5
+; SSE2-NEXT:    pcmpgtb %xmm2, %xmm5
+; SSE2-NEXT:    paddb %xmm5, %xmm2
+; SSE2-NEXT:    pxor %xmm5, %xmm2
+; SSE2-NEXT:    pcmpgtb %xmm3, %xmm4
+; SSE2-NEXT:    paddb %xmm4, %xmm3
+; SSE2-NEXT:    pxor %xmm4, %xmm3
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: test14:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    pabsb %xmm0, %xmm0
+; SSSE3-NEXT:    pabsb %xmm1, %xmm1
+; SSSE3-NEXT:    pabsb %xmm2, %xmm2
+; SSSE3-NEXT:    pabsb %xmm3, %xmm3
+; SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: test14:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpcmpgtb %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtb %xmm0, %xmm3, %xmm5
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm6
+; AVX1-NEXT:    vpaddb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddb %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vxorps %ymm6, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpcmpgtb %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtb %xmm1, %xmm3, %xmm3
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm5
+; AVX1-NEXT:    vpaddb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vxorps %ymm5, %ymm1, %ymm1
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test14:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpabsb %ymm0, %ymm0
+; AVX2-NEXT:    vpabsb %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: test14:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vpabsb %ymm0, %ymm0
+; AVX512F-NEXT:    vpabsb %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: test14:
+; AVX512BW:       # BB#0:
+; AVX512BW-NEXT:    vpxord %zmm1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpcmpgtb %zmm0, %zmm1, %k0
+; AVX512BW-NEXT:    vpmovm2b %k0, %zmm1
+; AVX512BW-NEXT:    vpaddb %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+  %tmp1neg = sub <64 x i8> zeroinitializer, %a
+  %b = icmp slt <64 x i8> %a, zeroinitializer
+  %abs = select <64 x i1> %b, <64 x i8> %tmp1neg, <64 x i8> %a
+  ret <64 x i8> %abs
+}
+
+define <32 x i16> @test15(<32 x i16> %a) nounwind {
+; SSE2-LABEL: test15:
+; SSE2:       # BB#0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    psraw $15, %xmm4
+; SSE2-NEXT:    paddw %xmm4, %xmm0
+; SSE2-NEXT:    pxor %xmm4, %xmm0
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    psraw $15, %xmm4
+; SSE2-NEXT:    paddw %xmm4, %xmm1
+; SSE2-NEXT:    pxor %xmm4, %xmm1
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    psraw $15, %xmm4
+; SSE2-NEXT:    paddw %xmm4, %xmm2
+; SSE2-NEXT:    pxor %xmm4, %xmm2
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    psraw $15, %xmm4
+; SSE2-NEXT:    paddw %xmm4, %xmm3
+; SSE2-NEXT:    pxor %xmm4, %xmm3
+; SSE2-NEXT:    retq
+;
+; SSSE3-LABEL: test15:
+; SSSE3:       # BB#0:
+; SSSE3-NEXT:    pabsw %xmm0, %xmm0
+; SSSE3-NEXT:    pabsw %xmm1, %xmm1
+; SSSE3-NEXT:    pabsw %xmm2, %xmm2
+; SSSE3-NEXT:    pabsw %xmm3, %xmm3
+; SSSE3-NEXT:    retq
+;
+; AVX1-LABEL: test15:
+; AVX1:       # BB#0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpsraw $15, %xmm2, %xmm3
+; AVX1-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsraw $15, %xmm0, %xmm4
+; AVX1-NEXT:    vpaddw %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm2
+; AVX1-NEXT:    vxorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpsraw $15, %xmm2, %xmm3
+; AVX1-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpsraw $15, %xmm1, %xmm4
+; AVX1-NEXT:    vpaddw %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm4, %ymm2
+; AVX1-NEXT:    vxorps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: test15:
+; AVX2:       # BB#0:
+; AVX2-NEXT:    vpabsw %ymm0, %ymm0
+; AVX2-NEXT:    vpabsw %ymm1, %ymm1
+; AVX2-NEXT:    retq
+;
+; AVX512F-LABEL: test15:
+; AVX512F:       # BB#0:
+; AVX512F-NEXT:    vpabsw %ymm0, %ymm0
+; AVX512F-NEXT:    vpabsw %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512BW-LABEL: test15:
+; AVX512BW:       # BB#0:
+; AVX512BW-NEXT:    vpsraw $15, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpaddw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpxorq %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+  %tmp1neg = sub <32 x i16> zeroinitializer, %a
+  %b = icmp sgt <32 x i16> %a, zeroinitializer
+  %abs = select <32 x i1> %b, <32 x i16> %a, <32 x i16> %tmp1neg
+  ret <32 x i16> %abs
+}




More information about the llvm-commits mailing list