[llvm-branch-commits] [llvm] b99782c - [X86][AVX] Adjust unsigned saturation downconvert negative test

Simon Pilgrim via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Thu Jan 14 10:02:14 PST 2021


Author: Simon Pilgrim
Date: 2021-01-14T17:51:23Z
New Revision: b99782cf7850a481fa36fd95ae04923739e0da6d

URL: https://github.com/llvm/llvm-project/commit/b99782cf7850a481fa36fd95ae04923739e0da6d
DIFF: https://github.com/llvm/llvm-project/commit/b99782cf7850a481fa36fd95ae04923739e0da6d.diff

LOG: [X86][AVX] Adjust unsigned saturation downconvert negative test

D87145 was showing that this test (added in D45315) could always be constant folded (with suitable value tracking).

What we actually needed was smax(smin()) negative test coverage, the invert of negative_test2_smax_usat_trunc_wb_256_mem, so I've tweaked the test to provide that instead.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/avx512-trunc.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/avx512-trunc.ll b/llvm/test/CodeGen/X86/avx512-trunc.ll
index 0b2a47c2772c..d61ada4e5d05 100644
--- a/llvm/test/CodeGen/X86/avx512-trunc.ll
+++ b/llvm/test/CodeGen/X86/avx512-trunc.ll
@@ -1007,10 +1007,8 @@ define <16 x i16> @smax_usat_trunc_dw_512(<16 x i32> %i) {
 define void @negative_test1_smax_usat_trunc_wb_256_mem(<16 x i16> %i, <16 x i8>* %res) {
 ; KNL-LABEL: negative_test1_smax_usat_trunc_wb_256_mem:
 ; KNL:       ## %bb.0:
-; KNL-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; KNL-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
-; KNL-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
-; KNL-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
+; KNL-NEXT:    vpminsw {{.*}}(%rip), %ymm0, %ymm0
+; KNL-NEXT:    vpmaxsw {{.*}}(%rip), %ymm0, %ymm0
 ; KNL-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
 ; KNL-NEXT:    vpmovdb %zmm0, (%rdi)
 ; KNL-NEXT:    vzeroupper
@@ -1018,17 +1016,15 @@ define void @negative_test1_smax_usat_trunc_wb_256_mem(<16 x i16> %i, <16 x i8>*
 ;
 ; SKX-LABEL: negative_test1_smax_usat_trunc_wb_256_mem:
 ; SKX:       ## %bb.0:
-; SKX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; SKX-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm0
-; SKX-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
-; SKX-NEXT:    vpminsw %ymm1, %ymm0, %ymm0
+; SKX-NEXT:    vpminsw {{.*}}(%rip), %ymm0, %ymm0
+; SKX-NEXT:    vpmaxsw {{.*}}(%rip), %ymm0, %ymm0
 ; SKX-NEXT:    vpmovwb %ymm0, (%rdi)
 ; SKX-NEXT:    vzeroupper
 ; SKX-NEXT:    retq
-  %x1 = icmp sgt <16 x i16> %i, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
-  %x2 = select <16 x i1> %x1, <16 x i16> %i, <16 x i16> <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
-  %x3 = icmp slt <16 x i16> %x2, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
-  %x5 = select <16 x i1> %x3, <16 x i16> %x2, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+  %x1 = icmp slt <16 x i16> %i, <i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5>
+  %x2 = select <16 x i1> %x1, <16 x i16> %i, <16 x i16> <i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5, i16 -5>
+  %x3 = icmp sgt <16 x i16> %x2, <i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10>
+  %x5 = select <16 x i1> %x3, <16 x i16> %x2, <16 x i16> <i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10, i16 -10>
   %x6 = trunc <16 x i16> %x5 to <16 x i8>
   store <16 x i8> %x6, <16 x i8>* %res, align 1
   ret void


        


More information about the llvm-branch-commits mailing list