[llvm-branch-commits] [llvm] 2ad2e91 - [X86] Add AVX2/AVX512 test coverage in sat-add.ll

Simon Pilgrim via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Fri Nov 27 08:16:00 PST 2020


Author: Simon Pilgrim
Date: 2020-11-27T16:11:02Z
New Revision: 2ad2e91016151b1f0a4bb0f98115f33f0d3b5766

URL: https://github.com/llvm/llvm-project/commit/2ad2e91016151b1f0a4bb0f98115f33f0d3b5766
DIFF: https://github.com/llvm/llvm-project/commit/2ad2e91016151b1f0a4bb0f98115f33f0d3b5766.diff

LOG: [X86] Add AVX2/AVX512 test coverage in sat-add.ll

Shows the failure to combine to uaddsat

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/sat-add.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/sat-add.ll b/llvm/test/CodeGen/X86/sat-add.ll
index 72ccebedc7a8..1dae100e0994 100644
--- a/llvm/test/CodeGen/X86/sat-add.ll
+++ b/llvm/test/CodeGen/X86/sat-add.ll
@@ -1,7 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2   | FileCheck %s --check-prefixes=ANY,SSE2
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=ANY,SSE4,SSE41
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.2 | FileCheck %s --check-prefixes=ANY,SSE4,SSE42
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2   | FileCheck %s --check-prefixes=ANY,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=ANY,SSE,SSE4,SSE41
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.2 | FileCheck %s --check-prefixes=ANY,SSE,SSE4,SSE42
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2   | FileCheck %s --check-prefixes=ANY,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=ANY,AVX,AVX512
 
 ; There are at least 3 potential patterns corresponding to an unsigned saturated add: min, cmp with sum, cmp with not.
 ; Test each of those patterns with i8/i16/i32/i64.
@@ -360,11 +362,17 @@ define i64 @unsigned_sat_variable_i64_using_cmp_notval(i64 %x, i64 %y) {
 }
 
 define <16 x i8> @unsigned_sat_constant_v16i8_using_min(<16 x i8> %x) {
-; ANY-LABEL: unsigned_sat_constant_v16i8_using_min:
-; ANY:       # %bb.0:
-; ANY-NEXT:    pminub {{.*}}(%rip), %xmm0
-; ANY-NEXT:    paddb {{.*}}(%rip), %xmm0
-; ANY-NEXT:    retq
+; SSE-LABEL: unsigned_sat_constant_v16i8_using_min:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pminub {{.*}}(%rip), %xmm0
+; SSE-NEXT:    paddb {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: unsigned_sat_constant_v16i8_using_min:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpminub {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpaddb {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
   %c = icmp ult <16 x i8> %x, <i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43>
   %s = select <16 x i1> %c, <16 x i8> %x, <16 x i8> <i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43>
   %r = add <16 x i8> %s, <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>
@@ -372,10 +380,24 @@ define <16 x i8> @unsigned_sat_constant_v16i8_using_min(<16 x i8> %x) {
 }
 
 define <16 x i8> @unsigned_sat_constant_v16i8_using_cmp_sum(<16 x i8> %x) {
-; ANY-LABEL: unsigned_sat_constant_v16i8_using_cmp_sum:
-; ANY:       # %bb.0:
-; ANY-NEXT:    paddusb {{.*}}(%rip), %xmm0
-; ANY-NEXT:    retq
+; SSE-LABEL: unsigned_sat_constant_v16i8_using_cmp_sum:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusb {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_constant_v16i8_using_cmp_sum:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddusb {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_constant_v16i8_using_cmp_sum:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddb {{.*}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm2
+; AVX512-NEXT:    vpcmpeqb %xmm2, %xmm0, %xmm2
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vpternlogq $222, %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
   %a = add <16 x i8> %x, <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>
   %c = icmp ugt <16 x i8> %x, %a
   %r = select <16 x i1> %c, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %a
@@ -383,10 +405,23 @@ define <16 x i8> @unsigned_sat_constant_v16i8_using_cmp_sum(<16 x i8> %x) {
 }
 
 define <16 x i8> @unsigned_sat_constant_v16i8_using_cmp_notval(<16 x i8> %x) {
-; ANY-LABEL: unsigned_sat_constant_v16i8_using_cmp_notval:
-; ANY:       # %bb.0:
-; ANY-NEXT:    paddusb {{.*}}(%rip), %xmm0
-; ANY-NEXT:    retq
+; SSE-LABEL: unsigned_sat_constant_v16i8_using_cmp_notval:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusb {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_constant_v16i8_using_cmp_notval:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddusb {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_constant_v16i8_using_cmp_notval:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddb {{.*}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT:    vpmaxub {{.*}}(%rip), %xmm0, %xmm2
+; AVX512-NEXT:    vpcmpeqb %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %a = add <16 x i8> %x, <i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42, i8 42>
   %c = icmp ugt <16 x i8> %x, <i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43, i8 -43>
   %r = select <16 x i1> %c, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %a
@@ -407,6 +442,12 @@ define <8 x i16> @unsigned_sat_constant_v8i16_using_min(<8 x i16> %x) {
 ; SSE4-NEXT:    pminuw {{.*}}(%rip), %xmm0
 ; SSE4-NEXT:    paddw {{.*}}(%rip), %xmm0
 ; SSE4-NEXT:    retq
+;
+; AVX-LABEL: unsigned_sat_constant_v8i16_using_min:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpminuw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
   %c = icmp ult <8 x i16> %x, <i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43>
   %s = select <8 x i1> %c, <8 x i16> %x, <8 x i16> <i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43>
   %r = add <8 x i16> %s, <i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42>
@@ -414,10 +455,24 @@ define <8 x i16> @unsigned_sat_constant_v8i16_using_min(<8 x i16> %x) {
 }
 
 define <8 x i16> @unsigned_sat_constant_v8i16_using_cmp_sum(<8 x i16> %x) {
-; ANY-LABEL: unsigned_sat_constant_v8i16_using_cmp_sum:
-; ANY:       # %bb.0:
-; ANY-NEXT:    paddusw {{.*}}(%rip), %xmm0
-; ANY-NEXT:    retq
+; SSE-LABEL: unsigned_sat_constant_v8i16_using_cmp_sum:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusw {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_constant_v8i16_using_cmp_sum:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddusw {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_constant_v8i16_using_cmp_sum:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT:    vpminuw %xmm1, %xmm0, %xmm2
+; AVX512-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm2
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vpternlogq $222, %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
   %a = add <8 x i16> %x, <i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42>
   %c = icmp ugt <8 x i16> %x, %a
   %r = select <8 x i1> %c, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %a
@@ -425,10 +480,23 @@ define <8 x i16> @unsigned_sat_constant_v8i16_using_cmp_sum(<8 x i16> %x) {
 }
 
 define <8 x i16> @unsigned_sat_constant_v8i16_using_cmp_notval(<8 x i16> %x) {
-; ANY-LABEL: unsigned_sat_constant_v8i16_using_cmp_notval:
-; ANY:       # %bb.0:
-; ANY-NEXT:    paddusw {{.*}}(%rip), %xmm0
-; ANY-NEXT:    retq
+; SSE-LABEL: unsigned_sat_constant_v8i16_using_cmp_notval:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusw {{.*}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_constant_v8i16_using_cmp_notval:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddusw {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_constant_v8i16_using_cmp_notval:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddw {{.*}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT:    vpmaxuw {{.*}}(%rip), %xmm0, %xmm2
+; AVX512-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %a = add <8 x i16> %x, <i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42, i16 42>
   %c = icmp ugt <8 x i16> %x, <i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43, i16 -43>
   %r = select <8 x i1> %c, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %a
@@ -453,6 +521,20 @@ define <4 x i32> @unsigned_sat_constant_v4i32_using_min(<4 x i32> %x) {
 ; SSE4-NEXT:    pminud {{.*}}(%rip), %xmm0
 ; SSE4-NEXT:    paddd {{.*}}(%rip), %xmm0
 ; SSE4-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_constant_v4i32_using_min:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [4294967253,4294967253,4294967253,4294967253]
+; AVX2-NEXT:    vpminud %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [42,42,42,42]
+; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_constant_v4i32_using_min:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpminud {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %c = icmp ult <4 x i32> %x, <i32 -43, i32 -43, i32 -43, i32 -43>
   %s = select <4 x i1> %c, <4 x i32> %x, <4 x i32> <i32 -43, i32 -43, i32 -43, i32 -43>
   %r = add <4 x i32> %s, <i32 42, i32 42, i32 42, i32 42>
@@ -483,6 +565,26 @@ define <4 x i32> @unsigned_sat_constant_v4i32_using_cmp_sum(<4 x i32> %x) {
 ; SSE4-NEXT:    por %xmm2, %xmm1
 ; SSE4-NEXT:    movdqa %xmm1, %xmm0
 ; SSE4-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_constant_v4i32_using_cmp_sum:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [42,42,42,42]
+; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpminud %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_constant_v4i32_using_cmp_sum:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm1
+; AVX512-NEXT:    vpcmpnleud %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm1 {%k1}
+; AVX512-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512-NEXT:    retq
   %a = add <4 x i32> %x, <i32 42, i32 42, i32 42, i32 42>
   %c = icmp ugt <4 x i32> %x, %a
   %r = select <4 x i1> %c, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %a
@@ -508,6 +610,25 @@ define <4 x i32> @unsigned_sat_constant_v4i32_using_cmp_notval(<4 x i32> %x) {
 ; SSE4-NEXT:    pcmpeqd %xmm2, %xmm0
 ; SSE4-NEXT:    por %xmm1, %xmm0
 ; SSE4-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_constant_v4i32_using_cmp_notval:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [42,42,42,42]
+; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [4294967254,4294967254,4294967254,4294967254]
+; AVX2-NEXT:    vpmaxud %xmm2, %xmm0, %xmm2
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_constant_v4i32_using_cmp_notval:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm1
+; AVX512-NEXT:    vpcmpnleud {{.*}}(%rip){1to4}, %xmm0, %k1
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm1 {%k1}
+; AVX512-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512-NEXT:    retq
   %a = add <4 x i32> %x, <i32 42, i32 42, i32 42, i32 42>
   %c = icmp ugt <4 x i32> %x, <i32 -43, i32 -43, i32 -43, i32 -43>
   %r = select <4 x i1> %c, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %a
@@ -533,6 +654,23 @@ define <4 x i32> @unsigned_sat_constant_v4i32_using_cmp_notval_nonsplat(<4 x i32
 ; SSE4-NEXT:    pcmpeqd %xmm2, %xmm0
 ; SSE4-NEXT:    por %xmm1, %xmm0
 ; SSE4-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_constant_v4i32_using_cmp_notval_nonsplat:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm1
+; AVX2-NEXT:    vpmaxud {{.*}}(%rip), %xmm0, %xmm2
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_constant_v4i32_using_cmp_notval_nonsplat:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT:    vpcmpnleud {{.*}}(%rip), %xmm0, %k1
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm1 {%k1}
+; AVX512-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512-NEXT:    retq
   %a = add <4 x i32> %x, <i32 43, i32 44, i32 45, i32 46>
   %c = icmp ugt <4 x i32> %x, <i32 -44, i32 -45, i32 -46, i32 -47>
   %r = select <4 x i1> %c, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %a
@@ -589,6 +727,22 @@ define <2 x i64> @unsigned_sat_constant_v2i64_using_min(<2 x i64> %x) {
 ; SSE42-NEXT:    paddq {{.*}}(%rip), %xmm2
 ; SSE42-NEXT:    movdqa %xmm2, %xmm0
 ; SSE42-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_constant_v2i64_using_min:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovapd {{.*#+}} xmm1 = [18446744073709551573,18446744073709551573]
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775765,9223372036854775765]
+; AVX2-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX2-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vpaddq {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_constant_v2i64_using_min:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpminuq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vpaddq {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %c = icmp ult <2 x i64> %x, <i64 -43, i64 -43>
   %s = select <2 x i1> %c, <2 x i64> %x, <2 x i64> <i64 -43, i64 -43>
   %r = add <2 x i64> %s, <i64 42, i64 42>
@@ -642,6 +796,25 @@ define <2 x i64> @unsigned_sat_constant_v2i64_using_cmp_sum(<2 x i64> %x) {
 ; SSE42-NEXT:    pcmpgtq %xmm2, %xmm0
 ; SSE42-NEXT:    por %xmm1, %xmm0
 ; SSE42-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_constant_v2i64_using_cmp_sum:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddq {{.*}}(%rip), %xmm0, %xmm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm2
+; AVX2-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_constant_v2i64_using_cmp_sum:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddq {{.*}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT:    vpcmpnleuq %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa64 %xmm0, %xmm1 {%k1}
+; AVX512-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512-NEXT:    retq
   %a = add <2 x i64> %x, <i64 42, i64 42>
   %c = icmp ugt <2 x i64> %x, %a
   %r = select <2 x i1> %c, <2 x i64> <i64 -1, i64 -1>, <2 x i64> %a
@@ -691,6 +864,23 @@ define <2 x i64> @unsigned_sat_constant_v2i64_using_cmp_notval(<2 x i64> %x) {
 ; SSE42-NEXT:    pcmpgtq {{.*}}(%rip), %xmm0
 ; SSE42-NEXT:    por %xmm1, %xmm0
 ; SSE42-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_constant_v2i64_using_cmp_notval:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddq {{.*}}(%rip), %xmm0, %xmm1
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpgtq {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_constant_v2i64_using_cmp_notval:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddq {{.*}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT:    vpcmpnleuq {{.*}}(%rip), %xmm0, %k1
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa64 %xmm0, %xmm1 {%k1}
+; AVX512-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512-NEXT:    retq
   %a = add <2 x i64> %x, <i64 42, i64 42>
   %c = icmp ugt <2 x i64> %x, <i64 -43, i64 -43>
   %r = select <2 x i1> %c, <2 x i64> <i64 -1, i64 -1>, <2 x i64> %a
@@ -698,13 +888,29 @@ define <2 x i64> @unsigned_sat_constant_v2i64_using_cmp_notval(<2 x i64> %x) {
 }
 
 define <16 x i8> @unsigned_sat_variable_v16i8_using_min(<16 x i8> %x, <16 x i8> %y) {
-; ANY-LABEL: unsigned_sat_variable_v16i8_using_min:
-; ANY:       # %bb.0:
-; ANY-NEXT:    pcmpeqd %xmm2, %xmm2
-; ANY-NEXT:    pxor %xmm1, %xmm2
-; ANY-NEXT:    pminub %xmm2, %xmm0
-; ANY-NEXT:    paddb %xmm1, %xmm0
-; ANY-NEXT:    retq
+; SSE-LABEL: unsigned_sat_variable_v16i8_using_min:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    pxor %xmm1, %xmm2
+; SSE-NEXT:    pminub %xmm2, %xmm0
+; SSE-NEXT:    paddb %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_variable_v16i8_using_min:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm2
+; AVX2-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_variable_v16i8_using_min:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovdqa %xmm1, %xmm2
+; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm2
+; AVX512-NEXT:    vpminub %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %noty = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   %c = icmp ult <16 x i8> %x, %noty
   %s = select <16 x i1> %c, <16 x i8> %x, <16 x i8> %noty
@@ -713,10 +919,24 @@ define <16 x i8> @unsigned_sat_variable_v16i8_using_min(<16 x i8> %x, <16 x i8>
 }
 
 define <16 x i8> @unsigned_sat_variable_v16i8_using_cmp_sum(<16 x i8> %x, <16 x i8> %y) {
-; ANY-LABEL: unsigned_sat_variable_v16i8_using_cmp_sum:
-; ANY:       # %bb.0:
-; ANY-NEXT:    paddusb %xmm1, %xmm0
-; ANY-NEXT:    retq
+; SSE-LABEL: unsigned_sat_variable_v16i8_using_cmp_sum:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusb %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_variable_v16i8_using_cmp_sum:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_variable_v16i8_using_cmp_sum:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddb %xmm1, %xmm0, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm2
+; AVX512-NEXT:    vpcmpeqb %xmm2, %xmm0, %xmm2
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vpternlogq $222, %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
   %a = add <16 x i8> %x, %y
   %c = icmp ugt <16 x i8> %x, %a
   %r = select <16 x i1> %c, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> %a
@@ -724,17 +944,38 @@ define <16 x i8> @unsigned_sat_variable_v16i8_using_cmp_sum(<16 x i8> %x, <16 x
 }
 
 define <16 x i8> @unsigned_sat_variable_v16i8_using_cmp_notval(<16 x i8> %x, <16 x i8> %y) {
-; ANY-LABEL: unsigned_sat_variable_v16i8_using_cmp_notval:
-; ANY:       # %bb.0:
-; ANY-NEXT:    pcmpeqd %xmm2, %xmm2
-; ANY-NEXT:    movdqa %xmm0, %xmm3
-; ANY-NEXT:    paddb %xmm1, %xmm3
-; ANY-NEXT:    pxor %xmm2, %xmm1
-; ANY-NEXT:    pminub %xmm0, %xmm1
-; ANY-NEXT:    pcmpeqb %xmm1, %xmm0
-; ANY-NEXT:    pxor %xmm2, %xmm0
-; ANY-NEXT:    por %xmm3, %xmm0
-; ANY-NEXT:    retq
+; SSE-LABEL: unsigned_sat_variable_v16i8_using_cmp_notval:
+; SSE:       # %bb.0:
+; SSE-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE-NEXT:    movdqa %xmm0, %xmm3
+; SSE-NEXT:    paddb %xmm1, %xmm3
+; SSE-NEXT:    pxor %xmm2, %xmm1
+; SSE-NEXT:    pminub %xmm0, %xmm1
+; SSE-NEXT:    pcmpeqb %xmm1, %xmm0
+; SSE-NEXT:    pxor %xmm2, %xmm0
+; SSE-NEXT:    por %xmm3, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_variable_v16i8_using_cmp_notval:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm3
+; AVX2-NEXT:    vpaddb %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpminub %xmm3, %xmm0, %xmm3
+; AVX2-NEXT:    vpcmpeqb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_variable_v16i8_using_cmp_notval:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpaddb %xmm1, %xmm0, %xmm3
+; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm1
+; AVX512-NEXT:    vpcmpeqb %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpternlogq $222, %xmm2, %xmm3, %xmm0
+; AVX512-NEXT:    retq
   %noty = xor <16 x i8> %y, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
   %a = add <16 x i8> %x, %y
   %c = icmp ugt <16 x i8> %x, %noty
@@ -760,6 +1001,22 @@ define <8 x i16> @unsigned_sat_variable_v8i16_using_min(<8 x i16> %x, <8 x i16>
 ; SSE4-NEXT:    pminuw %xmm2, %xmm0
 ; SSE4-NEXT:    paddw %xmm1, %xmm0
 ; SSE4-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_variable_v8i16_using_min:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm2
+; AVX2-NEXT:    vpminuw %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_variable_v8i16_using_min:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovdqa %xmm1, %xmm2
+; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm2
+; AVX512-NEXT:    vpminuw %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %noty = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   %c = icmp ult <8 x i16> %x, %noty
   %s = select <8 x i1> %c, <8 x i16> %x, <8 x i16> %noty
@@ -768,10 +1025,24 @@ define <8 x i16> @unsigned_sat_variable_v8i16_using_min(<8 x i16> %x, <8 x i16>
 }
 
 define <8 x i16> @unsigned_sat_variable_v8i16_using_cmp_sum(<8 x i16> %x, <8 x i16> %y) {
-; ANY-LABEL: unsigned_sat_variable_v8i16_using_cmp_sum:
-; ANY:       # %bb.0:
-; ANY-NEXT:    paddusw %xmm1, %xmm0
-; ANY-NEXT:    retq
+; SSE-LABEL: unsigned_sat_variable_v8i16_using_cmp_sum:
+; SSE:       # %bb.0:
+; SSE-NEXT:    paddusw %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_variable_v8i16_using_cmp_sum:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_variable_v8i16_using_cmp_sum:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddw %xmm1, %xmm0, %xmm1
+; AVX512-NEXT:    vpminuw %xmm1, %xmm0, %xmm2
+; AVX512-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm2
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vpternlogq $222, %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    retq
   %a = add <8 x i16> %x, %y
   %c = icmp ugt <8 x i16> %x, %a
   %r = select <8 x i1> %c, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> %a
@@ -800,6 +1071,27 @@ define <8 x i16> @unsigned_sat_variable_v8i16_using_cmp_notval(<8 x i16> %x, <8
 ; SSE4-NEXT:    pxor %xmm2, %xmm0
 ; SSE4-NEXT:    por %xmm3, %xmm0
 ; SSE4-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_variable_v8i16_using_cmp_notval:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm3
+; AVX2-NEXT:    vpaddw %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpminuw %xmm3, %xmm0, %xmm3
+; AVX2-NEXT:    vpcmpeqw %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_variable_v8i16_using_cmp_notval:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpaddw %xmm1, %xmm0, %xmm3
+; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpminuw %xmm1, %xmm0, %xmm1
+; AVX512-NEXT:    vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpternlogq $222, %xmm2, %xmm3, %xmm0
+; AVX512-NEXT:    retq
   %noty = xor <8 x i16> %y, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
   %a = add <8 x i16> %x, %y
   %c = icmp ugt <8 x i16> %x, %noty
@@ -831,6 +1123,22 @@ define <4 x i32> @unsigned_sat_variable_v4i32_using_min(<4 x i32> %x, <4 x i32>
 ; SSE4-NEXT:    pminud %xmm2, %xmm0
 ; SSE4-NEXT:    paddd %xmm1, %xmm0
 ; SSE4-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_variable_v4i32_using_min:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm2
+; AVX2-NEXT:    vpminud %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_variable_v4i32_using_min:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovdqa %xmm1, %xmm2
+; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm2
+; AVX512-NEXT:    vpminud %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %noty = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
   %c = icmp ult <4 x i32> %x, %noty
   %s = select <4 x i1> %c, <4 x i32> %x, <4 x i32> %noty
@@ -860,6 +1168,25 @@ define <4 x i32> @unsigned_sat_variable_v4i32_using_cmp_sum(<4 x i32> %x, <4 x i
 ; SSE4-NEXT:    por %xmm1, %xmm2
 ; SSE4-NEXT:    movdqa %xmm2, %xmm0
 ; SSE4-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_variable_v4i32_using_cmp_sum:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpminud %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_variable_v4i32_using_cmp_sum:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX512-NEXT:    vpcmpnleud %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa32 %xmm0, %xmm1 {%k1}
+; AVX512-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512-NEXT:    retq
   %a = add <4 x i32> %x, %y
   %c = icmp ugt <4 x i32> %x, %a
   %r = select <4 x i1> %c, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %a
@@ -888,6 +1215,27 @@ define <4 x i32> @unsigned_sat_variable_v4i32_using_cmp_notval(<4 x i32> %x, <4
 ; SSE4-NEXT:    pxor %xmm2, %xmm0
 ; SSE4-NEXT:    por %xmm3, %xmm0
 ; SSE4-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_variable_v4i32_using_cmp_notval:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm3
+; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpminud %xmm3, %xmm0, %xmm3
+; AVX2-NEXT:    vpcmpeqd %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_variable_v4i32_using_cmp_notval:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm2
+; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpcmpnleud %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vmovdqa32 %xmm3, %xmm2 {%k1}
+; AVX512-NEXT:    vmovdqa %xmm2, %xmm0
+; AVX512-NEXT:    retq
   %noty = xor <4 x i32> %y, <i32 -1, i32 -1, i32 -1, i32 -1>
   %a = add <4 x i32> %x, %y
   %c = icmp ugt <4 x i32> %x, %noty
@@ -953,6 +1301,25 @@ define <2 x i64> @unsigned_sat_variable_v2i64_using_min(<2 x i64> %x, <2 x i64>
 ; SSE42-NEXT:    paddq %xmm1, %xmm3
 ; SSE42-NEXT:    movdqa %xmm3, %xmm0
 ; SSE42-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_variable_v2i64_using_min:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm2
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm3
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm1, %xmm4
+; AVX2-NEXT:    vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX2-NEXT:    vblendvpd %xmm3, %xmm0, %xmm2, %xmm0
+; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_variable_v2i64_using_min:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovdqa %xmm1, %xmm2
+; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm2
+; AVX512-NEXT:    vpminuq %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    retq
   %noty = xor <2 x i64> %y, <i64 -1, i64 -1>
   %c = icmp ult <2 x i64> %x, %noty
   %s = select <2 x i1> %c, <2 x i64> %x, <2 x i64> %noty
@@ -1004,6 +1371,25 @@ define <2 x i64> @unsigned_sat_variable_v2i64_using_cmp_sum(<2 x i64> %x, <2 x i
 ; SSE42-NEXT:    pcmpgtq %xmm2, %xmm0
 ; SSE42-NEXT:    por %xmm1, %xmm0
 ; SSE42-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_variable_v2i64_using_cmp_sum:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm2
+; AVX2-NEXT:    vpcmpgtq %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_variable_v2i64_using_cmp_sum:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpaddq %xmm1, %xmm0, %xmm1
+; AVX512-NEXT:    vpcmpnleuq %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa64 %xmm0, %xmm1 {%k1}
+; AVX512-NEXT:    vmovdqa %xmm1, %xmm0
+; AVX512-NEXT:    retq
   %a = add <2 x i64> %x, %y
   %c = icmp ugt <2 x i64> %x, %a
   %r = select <2 x i1> %c, <2 x i64> <i64 -1, i64 -1>, <2 x i64> %a
@@ -1054,6 +1440,25 @@ define <2 x i64> @unsigned_sat_variable_v2i64_using_cmp_notval(<2 x i64> %x, <2
 ; SSE42-NEXT:    pcmpgtq %xmm1, %xmm0
 ; SSE42-NEXT:    por %xmm2, %xmm0
 ; SSE42-NEXT:    retq
+;
+; AVX2-LABEL: unsigned_sat_variable_v2i64_using_cmp_notval:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpxor {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: unsigned_sat_variable_v2i64_using_cmp_notval:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; AVX512-NEXT:    vpaddq %xmm1, %xmm0, %xmm2
+; AVX512-NEXT:    vpternlogq $15, %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vpcmpnleuq %xmm1, %xmm0, %k1
+; AVX512-NEXT:    vmovdqa64 %xmm3, %xmm2 {%k1}
+; AVX512-NEXT:    vmovdqa %xmm2, %xmm0
+; AVX512-NEXT:    retq
   %noty = xor <2 x i64> %y, <i64 -1, i64 -1>
   %a = add <2 x i64> %x, %y
   %c = icmp ugt <2 x i64> %x, %noty


        


More information about the llvm-branch-commits mailing list