[llvm] 2315410 - [X86] Cleanup overflow test check prefixes. NFCI.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 23 05:34:52 PST 2021


Author: Simon Pilgrim
Date: 2021-02-23T13:34:06Z
New Revision: 2315410f578c2c62c224a1665fe4597bef1b4029

URL: https://github.com/llvm/llvm-project/commit/2315410f578c2c62c224a1665fe4597bef1b4029
DIFF: https://github.com/llvm/llvm-project/commit/2315410f578c2c62c224a1665fe4597bef1b4029.diff

LOG: [X86] Cleanup overflow test check prefixes. NFCI.

Tidy up the check prefixes to improve reuse.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/vec_saddo.ll
    llvm/test/CodeGen/X86/vec_smulo.ll
    llvm/test/CodeGen/X86/vec_ssubo.ll
    llvm/test/CodeGen/X86/vec_uaddo.ll
    llvm/test/CodeGen/X86/vec_umulo.ll
    llvm/test/CodeGen/X86/vec_usubo.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/vec_saddo.ll b/llvm/test/CodeGen/X86/vec_saddo.ll
index e89bd8f81a28..cba1057ec46b 100644
--- a/llvm/test/CodeGen/X86/vec_saddo.ll
+++ b/llvm/test/CodeGen/X86/vec_saddo.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=CHECK,SSE,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=CHECK,AVX512
 
 declare {<1 x i32>, <1 x i1>} @llvm.sadd.with.overflow.v1i32(<1 x i32>, <1 x i32>)
 declare {<2 x i32>, <2 x i1>} @llvm.sadd.with.overflow.v2i32(<2 x i32>, <2 x i32>)
@@ -23,23 +23,14 @@ declare {<4 x i1>, <4 x i1>} @llvm.sadd.with.overflow.v4i1(<4 x i1>, <4 x i1>)
 declare {<2 x i128>, <2 x i1>} @llvm.sadd.with.overflow.v2i128(<2 x i128>, <2 x i128>)
 
 define <1 x i32> @saddo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) nounwind {
-; SSE-LABEL: saddo_v1i32:
-; SSE:       # %bb.0:
-; SSE-NEXT:    xorl %eax, %eax
-; SSE-NEXT:    addl %esi, %edi
-; SSE-NEXT:    seto %al
-; SSE-NEXT:    negl %eax
-; SSE-NEXT:    movl %edi, (%rdx)
-; SSE-NEXT:    retq
-;
-; AVX-LABEL: saddo_v1i32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    xorl %eax, %eax
-; AVX-NEXT:    addl %esi, %edi
-; AVX-NEXT:    seto %al
-; AVX-NEXT:    negl %eax
-; AVX-NEXT:    movl %edi, (%rdx)
-; AVX-NEXT:    retq
+; CHECK-LABEL: saddo_v1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    addl %esi, %edi
+; CHECK-NEXT:    seto %al
+; CHECK-NEXT:    negl %eax
+; CHECK-NEXT:    movl %edi, (%rdx)
+; CHECK-NEXT:    retq
   %t = call {<1 x i32>, <1 x i1>} @llvm.sadd.with.overflow.v1i32(<1 x i32> %a0, <1 x i32> %a1)
   %val = extractvalue {<1 x i32>, <1 x i1>} %t, 0
   %obit = extractvalue {<1 x i32>, <1 x i1>} %t, 1
@@ -59,25 +50,15 @@ define <2 x i32> @saddo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun
 ; SSE-NEXT:    movq %xmm1, (%rdi)
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: saddo_v2i32:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vmovq %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: saddo_v2i32:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm2, %xmm2
-; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX2-NEXT:    vmovq %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: saddo_v2i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtd %xmm1, %xmm2, %xmm2
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpxor %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vmovq %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: saddo_v2i32:
 ; AVX512:       # %bb.0:
@@ -134,27 +115,16 @@ define <3 x i32> @saddo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun
 ; SSE41-NEXT:    movq %xmm1, (%rdi)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: saddo_v3i32:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vpextrd $2, %xmm1, 8(%rdi)
-; AVX1-NEXT:    vmovq %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: saddo_v3i32:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm2, %xmm2
-; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX2-NEXT:    vpextrd $2, %xmm1, 8(%rdi)
-; AVX2-NEXT:    vmovq %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: saddo_v3i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtd %xmm1, %xmm2, %xmm2
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpxor %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vpextrd $2, %xmm1, 8(%rdi)
+; AVX-NEXT:    vmovq %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: saddo_v3i32:
 ; AVX512:       # %bb.0:
@@ -187,25 +157,15 @@ define <4 x i32> @saddo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) noun
 ; SSE-NEXT:    movdqa %xmm1, (%rdi)
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: saddo_v4i32:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: saddo_v4i32:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm2, %xmm2
-; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX2-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: saddo_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtd %xmm1, %xmm2, %xmm2
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpxor %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vmovdqa %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: saddo_v4i32:
 ; AVX512:       # %bb.0:
@@ -827,27 +787,16 @@ define <2 x i32> @saddo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: saddo_v2i64:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm2, %xmm2
-; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: saddo_v2i64:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpcmpgtq %xmm1, %xmm2, %xmm2
-; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: saddo_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtq %xmm1, %xmm2, %xmm2
+; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpxor %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX-NEXT:    vmovdqa %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: saddo_v2i64:
 ; AVX512:       # %bb.0:
@@ -971,65 +920,35 @@ define <4 x i32> @saddo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) noun
 ; SSE41-NEXT:    movb %sil, 2(%rdi)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: saddo_v4i24:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpslld $8, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrad $8, %xmm1, %xmm1
-; AVX1-NEXT:    vpslld $8, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrad $8, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpslld $8, %xmm1, %xmm0
-; AVX1-NEXT:    vpsrad $8, %xmm0, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpextrd $3, %xmm1, %eax
-; AVX1-NEXT:    movw %ax, 9(%rdi)
-; AVX1-NEXT:    vpextrd $2, %xmm1, %ecx
-; AVX1-NEXT:    movw %cx, 6(%rdi)
-; AVX1-NEXT:    vpextrd $1, %xmm1, %edx
-; AVX1-NEXT:    movw %dx, 3(%rdi)
-; AVX1-NEXT:    vmovd %xmm1, %esi
-; AVX1-NEXT:    movw %si, (%rdi)
-; AVX1-NEXT:    shrl $16, %eax
-; AVX1-NEXT:    movb %al, 11(%rdi)
-; AVX1-NEXT:    shrl $16, %ecx
-; AVX1-NEXT:    movb %cl, 8(%rdi)
-; AVX1-NEXT:    shrl $16, %edx
-; AVX1-NEXT:    movb %dl, 5(%rdi)
-; AVX1-NEXT:    shrl $16, %esi
-; AVX1-NEXT:    movb %sil, 2(%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: saddo_v4i24:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpslld $8, %xmm1, %xmm1
-; AVX2-NEXT:    vpsrad $8, %xmm1, %xmm1
-; AVX2-NEXT:    vpslld $8, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrad $8, %xmm0, %xmm0
-; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpslld $8, %xmm1, %xmm0
-; AVX2-NEXT:    vpsrad $8, %xmm0, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vpextrd $3, %xmm1, %eax
-; AVX2-NEXT:    movw %ax, 9(%rdi)
-; AVX2-NEXT:    vpextrd $2, %xmm1, %ecx
-; AVX2-NEXT:    movw %cx, 6(%rdi)
-; AVX2-NEXT:    vpextrd $1, %xmm1, %edx
-; AVX2-NEXT:    movw %dx, 3(%rdi)
-; AVX2-NEXT:    vmovd %xmm1, %esi
-; AVX2-NEXT:    movw %si, (%rdi)
-; AVX2-NEXT:    shrl $16, %eax
-; AVX2-NEXT:    movb %al, 11(%rdi)
-; AVX2-NEXT:    shrl $16, %ecx
-; AVX2-NEXT:    movb %cl, 8(%rdi)
-; AVX2-NEXT:    shrl $16, %edx
-; AVX2-NEXT:    movb %dl, 5(%rdi)
-; AVX2-NEXT:    shrl $16, %esi
-; AVX2-NEXT:    movb %sil, 2(%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: saddo_v4i24:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpslld $8, %xmm1, %xmm1
+; AVX-NEXT:    vpsrad $8, %xmm1, %xmm1
+; AVX-NEXT:    vpslld $8, %xmm0, %xmm0
+; AVX-NEXT:    vpsrad $8, %xmm0, %xmm0
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpslld $8, %xmm1, %xmm0
+; AVX-NEXT:    vpsrad $8, %xmm0, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpextrd $3, %xmm1, %eax
+; AVX-NEXT:    movw %ax, 9(%rdi)
+; AVX-NEXT:    vpextrd $2, %xmm1, %ecx
+; AVX-NEXT:    movw %cx, 6(%rdi)
+; AVX-NEXT:    vpextrd $1, %xmm1, %edx
+; AVX-NEXT:    movw %dx, 3(%rdi)
+; AVX-NEXT:    vmovd %xmm1, %esi
+; AVX-NEXT:    movw %si, (%rdi)
+; AVX-NEXT:    shrl $16, %eax
+; AVX-NEXT:    movb %al, 11(%rdi)
+; AVX-NEXT:    shrl $16, %ecx
+; AVX-NEXT:    movb %cl, 8(%rdi)
+; AVX-NEXT:    shrl $16, %edx
+; AVX-NEXT:    movb %dl, 5(%rdi)
+; AVX-NEXT:    shrl $16, %esi
+; AVX-NEXT:    movb %sil, 2(%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: saddo_v4i24:
 ; AVX512:       # %bb.0:
@@ -1086,37 +1005,21 @@ define <4 x i32> @saddo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: saddo_v4i1:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpslld $31, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
-; AVX1-NEXT:    vpslld $31, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
-; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpslld $31, %xmm0, %xmm1
-; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm2
-; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovmskps %xmm1, %eax
-; AVX1-NEXT:    movb %al, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: saddo_v4i1:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpslld $31, %xmm1, %xmm1
-; AVX2-NEXT:    vpsrad $31, %xmm1, %xmm1
-; AVX2-NEXT:    vpslld $31, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrad $31, %xmm0, %xmm0
-; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpslld $31, %xmm0, %xmm1
-; AVX2-NEXT:    vpsrad $31, %xmm1, %xmm2
-; AVX2-NEXT:    vpcmpeqd %xmm0, %xmm2, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vmovmskps %xmm1, %eax
-; AVX2-NEXT:    movb %al, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: saddo_v4i1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpslld $31, %xmm1, %xmm1
+; AVX-NEXT:    vpsrad $31, %xmm1, %xmm1
+; AVX-NEXT:    vpslld $31, %xmm0, %xmm0
+; AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpslld $31, %xmm0, %xmm1
+; AVX-NEXT:    vpsrad $31, %xmm1, %xmm2
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vmovmskps %xmm1, %eax
+; AVX-NEXT:    movb %al, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: saddo_v4i1:
 ; AVX512:       # %bb.0:
@@ -1209,47 +1112,26 @@ define <2 x i32> @saddo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; SSE41-NEXT:    movq %rsi, 8(%r10)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: saddo_v2i128:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX1-NEXT:    addq %r8, %rdi
-; AVX1-NEXT:    adcq %r9, %rsi
-; AVX1-NEXT:    seto %r8b
-; AVX1-NEXT:    addq {{[0-9]+}}(%rsp), %rdx
-; AVX1-NEXT:    adcq {{[0-9]+}}(%rsp), %rcx
-; AVX1-NEXT:    seto %al
-; AVX1-NEXT:    movzbl %al, %r9d
-; AVX1-NEXT:    negl %r9d
-; AVX1-NEXT:    movzbl %r8b, %eax
-; AVX1-NEXT:    negl %eax
-; AVX1-NEXT:    vmovd %eax, %xmm0
-; AVX1-NEXT:    vpinsrd $1, %r9d, %xmm0, %xmm0
-; AVX1-NEXT:    movq %rdx, 16(%r10)
-; AVX1-NEXT:    movq %rdi, (%r10)
-; AVX1-NEXT:    movq %rcx, 24(%r10)
-; AVX1-NEXT:    movq %rsi, 8(%r10)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: saddo_v2i128:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX2-NEXT:    addq %r8, %rdi
-; AVX2-NEXT:    adcq %r9, %rsi
-; AVX2-NEXT:    seto %r8b
-; AVX2-NEXT:    addq {{[0-9]+}}(%rsp), %rdx
-; AVX2-NEXT:    adcq {{[0-9]+}}(%rsp), %rcx
-; AVX2-NEXT:    seto %al
-; AVX2-NEXT:    movzbl %al, %r9d
-; AVX2-NEXT:    negl %r9d
-; AVX2-NEXT:    movzbl %r8b, %eax
-; AVX2-NEXT:    negl %eax
-; AVX2-NEXT:    vmovd %eax, %xmm0
-; AVX2-NEXT:    vpinsrd $1, %r9d, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rdx, 16(%r10)
-; AVX2-NEXT:    movq %rdi, (%r10)
-; AVX2-NEXT:    movq %rcx, 24(%r10)
-; AVX2-NEXT:    movq %rsi, 8(%r10)
-; AVX2-NEXT:    retq
+; AVX-LABEL: saddo_v2i128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX-NEXT:    addq %r8, %rdi
+; AVX-NEXT:    adcq %r9, %rsi
+; AVX-NEXT:    seto %r8b
+; AVX-NEXT:    addq {{[0-9]+}}(%rsp), %rdx
+; AVX-NEXT:    adcq {{[0-9]+}}(%rsp), %rcx
+; AVX-NEXT:    seto %al
+; AVX-NEXT:    movzbl %al, %r9d
+; AVX-NEXT:    negl %r9d
+; AVX-NEXT:    movzbl %r8b, %eax
+; AVX-NEXT:    negl %eax
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    vpinsrd $1, %r9d, %xmm0, %xmm0
+; AVX-NEXT:    movq %rdx, 16(%r10)
+; AVX-NEXT:    movq %rdi, (%r10)
+; AVX-NEXT:    movq %rcx, 24(%r10)
+; AVX-NEXT:    movq %rsi, 8(%r10)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: saddo_v2i128:
 ; AVX512:       # %bb.0:

diff  --git a/llvm/test/CodeGen/X86/vec_smulo.ll b/llvm/test/CodeGen/X86/vec_smulo.ll
index 746f9e5e642e..0941810ccb30 100644
--- a/llvm/test/CodeGen/X86/vec_smulo.ll
+++ b/llvm/test/CodeGen/X86/vec_smulo.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=CHECK,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=CHECK,AVX512
 
 declare {<1 x i32>, <1 x i1>} @llvm.smul.with.overflow.v1i32(<1 x i32>, <1 x i32>)
 declare {<2 x i32>, <2 x i1>} @llvm.smul.with.overflow.v2i32(<2 x i32>, <2 x i32>)
@@ -25,23 +25,14 @@ declare {<4 x i1>, <4 x i1>} @llvm.smul.with.overflow.v4i1(<4 x i1>, <4 x i1>)
 declare {<2 x i128>, <2 x i1>} @llvm.smul.with.overflow.v2i128(<2 x i128>, <2 x i128>)
 
 define <1 x i32> @smulo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) nounwind {
-; SSE-LABEL: smulo_v1i32:
-; SSE:       # %bb.0:
-; SSE-NEXT:    xorl %eax, %eax
-; SSE-NEXT:    imull %esi, %edi
-; SSE-NEXT:    seto %al
-; SSE-NEXT:    negl %eax
-; SSE-NEXT:    movl %edi, (%rdx)
-; SSE-NEXT:    retq
-;
-; AVX-LABEL: smulo_v1i32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    xorl %eax, %eax
-; AVX-NEXT:    imull %esi, %edi
-; AVX-NEXT:    seto %al
-; AVX-NEXT:    negl %eax
-; AVX-NEXT:    movl %edi, (%rdx)
-; AVX-NEXT:    retq
+; CHECK-LABEL: smulo_v1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    imull %esi, %edi
+; CHECK-NEXT:    seto %al
+; CHECK-NEXT:    negl %eax
+; CHECK-NEXT:    movl %edi, (%rdx)
+; CHECK-NEXT:    retq
   %t = call {<1 x i32>, <1 x i1>} @llvm.smul.with.overflow.v1i32(<1 x i32> %a0, <1 x i32> %a1)
   %val = extractvalue {<1 x i32>, <1 x i1>} %t, 0
   %obit = extractvalue {<1 x i32>, <1 x i1>} %t, 1
@@ -3042,51 +3033,28 @@ define <2 x i32> @smulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun
 ; SSE41-NEXT:    movdqa %xmm1, (%rdi)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: smulo_v2i64:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovq %xmm1, %r8
-; AVX1-NEXT:    vmovq %xmm0, %rcx
-; AVX1-NEXT:    vpextrq $1, %xmm1, %rdx
-; AVX1-NEXT:    vpextrq $1, %xmm0, %rsi
-; AVX1-NEXT:    xorl %eax, %eax
-; AVX1-NEXT:    imulq %rdx, %rsi
-; AVX1-NEXT:    movq $-1, %r9
-; AVX1-NEXT:    movl $0, %edx
-; AVX1-NEXT:    cmovoq %r9, %rdx
-; AVX1-NEXT:    vmovq %rsi, %xmm0
-; AVX1-NEXT:    imulq %r8, %rcx
-; AVX1-NEXT:    vmovq %rcx, %xmm1
-; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; AVX1-NEXT:    vmovq %rdx, %xmm0
-; AVX1-NEXT:    cmovoq %r9, %rax
-; AVX1-NEXT:    vmovq %rax, %xmm2
-; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: smulo_v2i64:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovq %xmm1, %r8
-; AVX2-NEXT:    vmovq %xmm0, %rcx
-; AVX2-NEXT:    vpextrq $1, %xmm1, %rdx
-; AVX2-NEXT:    vpextrq $1, %xmm0, %rsi
-; AVX2-NEXT:    xorl %eax, %eax
-; AVX2-NEXT:    imulq %rdx, %rsi
-; AVX2-NEXT:    movq $-1, %r9
-; AVX2-NEXT:    movl $0, %edx
-; AVX2-NEXT:    cmovoq %r9, %rdx
-; AVX2-NEXT:    vmovq %rsi, %xmm0
-; AVX2-NEXT:    imulq %r8, %rcx
-; AVX2-NEXT:    vmovq %rcx, %xmm1
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; AVX2-NEXT:    vmovq %rdx, %xmm0
-; AVX2-NEXT:    cmovoq %r9, %rax
-; AVX2-NEXT:    vmovq %rax, %xmm2
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: smulo_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovq %xmm1, %r8
+; AVX-NEXT:    vmovq %xmm0, %rcx
+; AVX-NEXT:    vpextrq $1, %xmm1, %rdx
+; AVX-NEXT:    vpextrq $1, %xmm0, %rsi
+; AVX-NEXT:    xorl %eax, %eax
+; AVX-NEXT:    imulq %rdx, %rsi
+; AVX-NEXT:    movq $-1, %r9
+; AVX-NEXT:    movl $0, %edx
+; AVX-NEXT:    cmovoq %r9, %rdx
+; AVX-NEXT:    vmovq %rsi, %xmm0
+; AVX-NEXT:    imulq %r8, %rcx
+; AVX-NEXT:    vmovq %rcx, %xmm1
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; AVX-NEXT:    vmovq %rdx, %xmm0
+; AVX-NEXT:    cmovoq %r9, %rax
+; AVX-NEXT:    vmovq %rax, %xmm2
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX-NEXT:    vmovdqa %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: smulo_v2i64:
 ; AVX512:       # %bb.0:
@@ -3711,7 +3679,7 @@ define <2 x i32> @smulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; SSE2-NEXT:    leaq {{[0-9]+}}(%rsp), %r8
 ; SSE2-NEXT:    movq %rax, %rdx
 ; SSE2-NEXT:    movq %r9, %rcx
-; SSE2-NEXT:    callq __muloti4
+; SSE2-NEXT:    callq __muloti4 at PLT
 ; SSE2-NEXT:    movq %rax, %r13
 ; SSE2-NEXT:    movq %rdx, %rbp
 ; SSE2-NEXT:    movq $0, {{[0-9]+}}(%rsp)
@@ -3720,7 +3688,7 @@ define <2 x i32> @smulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; SSE2-NEXT:    movq %r14, %rsi
 ; SSE2-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
 ; SSE2-NEXT:    movq %r12, %rcx
-; SSE2-NEXT:    callq __muloti4
+; SSE2-NEXT:    callq __muloti4 at PLT
 ; SSE2-NEXT:    xorl %ecx, %ecx
 ; SSE2-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
 ; SSE2-NEXT:    sbbl %esi, %esi
@@ -3760,7 +3728,7 @@ define <2 x i32> @smulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; SSSE3-NEXT:    leaq {{[0-9]+}}(%rsp), %r8
 ; SSSE3-NEXT:    movq %rax, %rdx
 ; SSSE3-NEXT:    movq %r9, %rcx
-; SSSE3-NEXT:    callq __muloti4
+; SSSE3-NEXT:    callq __muloti4 at PLT
 ; SSSE3-NEXT:    movq %rax, %r13
 ; SSSE3-NEXT:    movq %rdx, %rbp
 ; SSSE3-NEXT:    movq $0, {{[0-9]+}}(%rsp)
@@ -3769,7 +3737,7 @@ define <2 x i32> @smulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; SSSE3-NEXT:    movq %r14, %rsi
 ; SSSE3-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
 ; SSSE3-NEXT:    movq %r12, %rcx
-; SSSE3-NEXT:    callq __muloti4
+; SSSE3-NEXT:    callq __muloti4 at PLT
 ; SSSE3-NEXT:    xorl %ecx, %ecx
 ; SSSE3-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
 ; SSSE3-NEXT:    sbbl %esi, %esi
@@ -3809,7 +3777,7 @@ define <2 x i32> @smulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; SSE41-NEXT:    leaq {{[0-9]+}}(%rsp), %r8
 ; SSE41-NEXT:    movq %rax, %rdx
 ; SSE41-NEXT:    movq %r9, %rcx
-; SSE41-NEXT:    callq __muloti4
+; SSE41-NEXT:    callq __muloti4 at PLT
 ; SSE41-NEXT:    movq %rax, %r13
 ; SSE41-NEXT:    movq %rdx, %rbp
 ; SSE41-NEXT:    movq $0, {{[0-9]+}}(%rsp)
@@ -3818,7 +3786,7 @@ define <2 x i32> @smulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; SSE41-NEXT:    movq %r14, %rsi
 ; SSE41-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
 ; SSE41-NEXT:    movq %r12, %rcx
-; SSE41-NEXT:    callq __muloti4
+; SSE41-NEXT:    callq __muloti4 at PLT
 ; SSE41-NEXT:    xorl %ecx, %ecx
 ; SSE41-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
 ; SSE41-NEXT:    sbbl %esi, %esi
@@ -3839,101 +3807,53 @@ define <2 x i32> @smulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; SSE41-NEXT:    popq %rbp
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: smulo_v2i128:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    pushq %rbp
-; AVX1-NEXT:    pushq %r15
-; AVX1-NEXT:    pushq %r14
-; AVX1-NEXT:    pushq %r13
-; AVX1-NEXT:    pushq %r12
-; AVX1-NEXT:    pushq %rbx
-; AVX1-NEXT:    subq $24, %rsp
-; AVX1-NEXT:    movq %r8, %rax
-; AVX1-NEXT:    movq %rcx, %r14
-; AVX1-NEXT:    movq %rdx, %rbx
-; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %r15
-; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %r12
-; AVX1-NEXT:    movq $0, {{[0-9]+}}(%rsp)
-; AVX1-NEXT:    leaq {{[0-9]+}}(%rsp), %r8
-; AVX1-NEXT:    movq %rax, %rdx
-; AVX1-NEXT:    movq %r9, %rcx
-; AVX1-NEXT:    callq __muloti4
-; AVX1-NEXT:    movq %rax, %r13
-; AVX1-NEXT:    movq %rdx, %rbp
-; AVX1-NEXT:    movq $0, {{[0-9]+}}(%rsp)
-; AVX1-NEXT:    leaq {{[0-9]+}}(%rsp), %r8
-; AVX1-NEXT:    movq %rbx, %rdi
-; AVX1-NEXT:    movq %r14, %rsi
-; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
-; AVX1-NEXT:    movq %r12, %rcx
-; AVX1-NEXT:    callq __muloti4
-; AVX1-NEXT:    xorl %ecx, %ecx
-; AVX1-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
-; AVX1-NEXT:    sbbl %esi, %esi
-; AVX1-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
-; AVX1-NEXT:    sbbl %ecx, %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm0
-; AVX1-NEXT:    vpinsrd $1, %esi, %xmm0, %xmm0
-; AVX1-NEXT:    movq %rdx, 24(%r15)
-; AVX1-NEXT:    movq %rax, 16(%r15)
-; AVX1-NEXT:    movq %rbp, 8(%r15)
-; AVX1-NEXT:    movq %r13, (%r15)
-; AVX1-NEXT:    addq $24, %rsp
-; AVX1-NEXT:    popq %rbx
-; AVX1-NEXT:    popq %r12
-; AVX1-NEXT:    popq %r13
-; AVX1-NEXT:    popq %r14
-; AVX1-NEXT:    popq %r15
-; AVX1-NEXT:    popq %rbp
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: smulo_v2i128:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    pushq %rbp
-; AVX2-NEXT:    pushq %r15
-; AVX2-NEXT:    pushq %r14
-; AVX2-NEXT:    pushq %r13
-; AVX2-NEXT:    pushq %r12
-; AVX2-NEXT:    pushq %rbx
-; AVX2-NEXT:    subq $24, %rsp
-; AVX2-NEXT:    movq %r8, %rax
-; AVX2-NEXT:    movq %rcx, %r14
-; AVX2-NEXT:    movq %rdx, %rbx
-; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %r15
-; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %r12
-; AVX2-NEXT:    movq $0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT:    leaq {{[0-9]+}}(%rsp), %r8
-; AVX2-NEXT:    movq %rax, %rdx
-; AVX2-NEXT:    movq %r9, %rcx
-; AVX2-NEXT:    callq __muloti4
-; AVX2-NEXT:    movq %rax, %r13
-; AVX2-NEXT:    movq %rdx, %rbp
-; AVX2-NEXT:    movq $0, {{[0-9]+}}(%rsp)
-; AVX2-NEXT:    leaq {{[0-9]+}}(%rsp), %r8
-; AVX2-NEXT:    movq %rbx, %rdi
-; AVX2-NEXT:    movq %r14, %rsi
-; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
-; AVX2-NEXT:    movq %r12, %rcx
-; AVX2-NEXT:    callq __muloti4
-; AVX2-NEXT:    xorl %ecx, %ecx
-; AVX2-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
-; AVX2-NEXT:    sbbl %esi, %esi
-; AVX2-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
-; AVX2-NEXT:    sbbl %ecx, %ecx
-; AVX2-NEXT:    vmovd %ecx, %xmm0
-; AVX2-NEXT:    vpinsrd $1, %esi, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rdx, 24(%r15)
-; AVX2-NEXT:    movq %rax, 16(%r15)
-; AVX2-NEXT:    movq %rbp, 8(%r15)
-; AVX2-NEXT:    movq %r13, (%r15)
-; AVX2-NEXT:    addq $24, %rsp
-; AVX2-NEXT:    popq %rbx
-; AVX2-NEXT:    popq %r12
-; AVX2-NEXT:    popq %r13
-; AVX2-NEXT:    popq %r14
-; AVX2-NEXT:    popq %r15
-; AVX2-NEXT:    popq %rbp
-; AVX2-NEXT:    retq
+; AVX-LABEL: smulo_v2i128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    pushq %rbp
+; AVX-NEXT:    pushq %r15
+; AVX-NEXT:    pushq %r14
+; AVX-NEXT:    pushq %r13
+; AVX-NEXT:    pushq %r12
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    subq $24, %rsp
+; AVX-NEXT:    movq %r8, %rax
+; AVX-NEXT:    movq %rcx, %r14
+; AVX-NEXT:    movq %rdx, %rbx
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %r15
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %r12
+; AVX-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; AVX-NEXT:    leaq {{[0-9]+}}(%rsp), %r8
+; AVX-NEXT:    movq %rax, %rdx
+; AVX-NEXT:    movq %r9, %rcx
+; AVX-NEXT:    callq __muloti4 at PLT
+; AVX-NEXT:    movq %rax, %r13
+; AVX-NEXT:    movq %rdx, %rbp
+; AVX-NEXT:    movq $0, {{[0-9]+}}(%rsp)
+; AVX-NEXT:    leaq {{[0-9]+}}(%rsp), %r8
+; AVX-NEXT:    movq %rbx, %rdi
+; AVX-NEXT:    movq %r14, %rsi
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
+; AVX-NEXT:    movq %r12, %rcx
+; AVX-NEXT:    callq __muloti4 at PLT
+; AVX-NEXT:    xorl %ecx, %ecx
+; AVX-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; AVX-NEXT:    sbbl %esi, %esi
+; AVX-NEXT:    cmpq {{[0-9]+}}(%rsp), %rcx
+; AVX-NEXT:    sbbl %ecx, %ecx
+; AVX-NEXT:    vmovd %ecx, %xmm0
+; AVX-NEXT:    vpinsrd $1, %esi, %xmm0, %xmm0
+; AVX-NEXT:    movq %rdx, 24(%r15)
+; AVX-NEXT:    movq %rax, 16(%r15)
+; AVX-NEXT:    movq %rbp, 8(%r15)
+; AVX-NEXT:    movq %r13, (%r15)
+; AVX-NEXT:    addq $24, %rsp
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    popq %r12
+; AVX-NEXT:    popq %r13
+; AVX-NEXT:    popq %r14
+; AVX-NEXT:    popq %r15
+; AVX-NEXT:    popq %rbp
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: smulo_v2i128:
 ; AVX512:       # %bb.0:
@@ -3953,7 +3873,7 @@ define <2 x i32> @smulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; AVX512-NEXT:    leaq {{[0-9]+}}(%rsp), %r8
 ; AVX512-NEXT:    movq %rax, %rdx
 ; AVX512-NEXT:    movq %r9, %rcx
-; AVX512-NEXT:    callq __muloti4
+; AVX512-NEXT:    callq __muloti4 at PLT
 ; AVX512-NEXT:    movq %rax, %r13
 ; AVX512-NEXT:    movq %rdx, %rbp
 ; AVX512-NEXT:    movq $0, {{[0-9]+}}(%rsp)
@@ -3962,7 +3882,7 @@ define <2 x i32> @smulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; AVX512-NEXT:    movq %r14, %rsi
 ; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
 ; AVX512-NEXT:    movq %r12, %rcx
-; AVX512-NEXT:    callq __muloti4
+; AVX512-NEXT:    callq __muloti4 at PLT
 ; AVX512-NEXT:    cmpq $0, {{[0-9]+}}(%rsp)
 ; AVX512-NEXT:    setne %cl
 ; AVX512-NEXT:    kmovd %ecx, %k0

diff  --git a/llvm/test/CodeGen/X86/vec_ssubo.ll b/llvm/test/CodeGen/X86/vec_ssubo.ll
index 397b11cb5625..3e6b1355f4a6 100644
--- a/llvm/test/CodeGen/X86/vec_ssubo.ll
+++ b/llvm/test/CodeGen/X86/vec_ssubo.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=CHECK,SSE,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=CHECK,AVX512
 
 declare {<1 x i32>, <1 x i1>} @llvm.ssub.with.overflow.v1i32(<1 x i32>, <1 x i32>)
 declare {<2 x i32>, <2 x i1>} @llvm.ssub.with.overflow.v2i32(<2 x i32>, <2 x i32>)
@@ -23,23 +23,14 @@ declare {<4 x i1>, <4 x i1>} @llvm.ssub.with.overflow.v4i1(<4 x i1>, <4 x i1>)
 declare {<2 x i128>, <2 x i1>} @llvm.ssub.with.overflow.v2i128(<2 x i128>, <2 x i128>)
 
 define <1 x i32> @ssubo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) nounwind {
-; SSE-LABEL: ssubo_v1i32:
-; SSE:       # %bb.0:
-; SSE-NEXT:    xorl %eax, %eax
-; SSE-NEXT:    subl %esi, %edi
-; SSE-NEXT:    seto %al
-; SSE-NEXT:    negl %eax
-; SSE-NEXT:    movl %edi, (%rdx)
-; SSE-NEXT:    retq
-;
-; AVX-LABEL: ssubo_v1i32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    xorl %eax, %eax
-; AVX-NEXT:    subl %esi, %edi
-; AVX-NEXT:    seto %al
-; AVX-NEXT:    negl %eax
-; AVX-NEXT:    movl %edi, (%rdx)
-; AVX-NEXT:    retq
+; CHECK-LABEL: ssubo_v1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    subl %esi, %edi
+; CHECK-NEXT:    seto %al
+; CHECK-NEXT:    negl %eax
+; CHECK-NEXT:    movl %edi, (%rdx)
+; CHECK-NEXT:    retq
   %t = call {<1 x i32>, <1 x i1>} @llvm.ssub.with.overflow.v1i32(<1 x i32> %a0, <1 x i32> %a1)
   %val = extractvalue {<1 x i32>, <1 x i1>} %t, 0
   %obit = extractvalue {<1 x i32>, <1 x i1>} %t, 1
@@ -60,25 +51,15 @@ define <2 x i32> @ssubo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun
 ; SSE-NEXT:    movq %xmm3, (%rdi)
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: ssubo_v2i32:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
-; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vmovq %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: ssubo_v2i32:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
-; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX2-NEXT:    vmovq %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: ssubo_v2i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
+; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpxor %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vmovq %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: ssubo_v2i32:
 ; AVX512:       # %bb.0:
@@ -138,27 +119,16 @@ define <3 x i32> @ssubo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun
 ; SSE41-NEXT:    movq %xmm3, (%rdi)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: ssubo_v3i32:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
-; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vpextrd $2, %xmm1, 8(%rdi)
-; AVX1-NEXT:    vmovq %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: ssubo_v3i32:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
-; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX2-NEXT:    vpextrd $2, %xmm1, 8(%rdi)
-; AVX2-NEXT:    vmovq %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: ssubo_v3i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
+; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpxor %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vpextrd $2, %xmm1, 8(%rdi)
+; AVX-NEXT:    vmovq %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: ssubo_v3i32:
 ; AVX512:       # %bb.0:
@@ -192,25 +162,15 @@ define <4 x i32> @ssubo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) noun
 ; SSE-NEXT:    movdqa %xmm3, (%rdi)
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: ssubo_v4i32:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
-; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: ssubo_v4i32:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
-; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX2-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: ssubo_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtd %xmm2, %xmm1, %xmm2
+; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpxor %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vmovdqa %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: ssubo_v4i32:
 ; AVX512:       # %bb.0:
@@ -836,27 +796,16 @@ define <2 x i32> @ssubo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun
 ; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: ssubo_v2i64:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm2
-; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: ssubo_v2i64:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm2
-; AVX2-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpxor %xmm0, %xmm2, %xmm0
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: ssubo_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpcmpgtq %xmm2, %xmm1, %xmm2
+; AVX-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpxor %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX-NEXT:    vmovdqa %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: ssubo_v2i64:
 ; AVX512:       # %bb.0:
@@ -980,65 +929,35 @@ define <4 x i32> @ssubo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) noun
 ; SSE41-NEXT:    movb %sil, 2(%rdi)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: ssubo_v4i24:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpslld $8, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrad $8, %xmm1, %xmm1
-; AVX1-NEXT:    vpslld $8, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrad $8, %xmm0, %xmm0
-; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpslld $8, %xmm1, %xmm0
-; AVX1-NEXT:    vpsrad $8, %xmm0, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpextrd $3, %xmm1, %eax
-; AVX1-NEXT:    movw %ax, 9(%rdi)
-; AVX1-NEXT:    vpextrd $2, %xmm1, %ecx
-; AVX1-NEXT:    movw %cx, 6(%rdi)
-; AVX1-NEXT:    vpextrd $1, %xmm1, %edx
-; AVX1-NEXT:    movw %dx, 3(%rdi)
-; AVX1-NEXT:    vmovd %xmm1, %esi
-; AVX1-NEXT:    movw %si, (%rdi)
-; AVX1-NEXT:    shrl $16, %eax
-; AVX1-NEXT:    movb %al, 11(%rdi)
-; AVX1-NEXT:    shrl $16, %ecx
-; AVX1-NEXT:    movb %cl, 8(%rdi)
-; AVX1-NEXT:    shrl $16, %edx
-; AVX1-NEXT:    movb %dl, 5(%rdi)
-; AVX1-NEXT:    shrl $16, %esi
-; AVX1-NEXT:    movb %sil, 2(%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: ssubo_v4i24:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpslld $8, %xmm1, %xmm1
-; AVX2-NEXT:    vpsrad $8, %xmm1, %xmm1
-; AVX2-NEXT:    vpslld $8, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrad $8, %xmm0, %xmm0
-; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpslld $8, %xmm1, %xmm0
-; AVX2-NEXT:    vpsrad $8, %xmm0, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vpextrd $3, %xmm1, %eax
-; AVX2-NEXT:    movw %ax, 9(%rdi)
-; AVX2-NEXT:    vpextrd $2, %xmm1, %ecx
-; AVX2-NEXT:    movw %cx, 6(%rdi)
-; AVX2-NEXT:    vpextrd $1, %xmm1, %edx
-; AVX2-NEXT:    movw %dx, 3(%rdi)
-; AVX2-NEXT:    vmovd %xmm1, %esi
-; AVX2-NEXT:    movw %si, (%rdi)
-; AVX2-NEXT:    shrl $16, %eax
-; AVX2-NEXT:    movb %al, 11(%rdi)
-; AVX2-NEXT:    shrl $16, %ecx
-; AVX2-NEXT:    movb %cl, 8(%rdi)
-; AVX2-NEXT:    shrl $16, %edx
-; AVX2-NEXT:    movb %dl, 5(%rdi)
-; AVX2-NEXT:    shrl $16, %esi
-; AVX2-NEXT:    movb %sil, 2(%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: ssubo_v4i24:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpslld $8, %xmm1, %xmm1
+; AVX-NEXT:    vpsrad $8, %xmm1, %xmm1
+; AVX-NEXT:    vpslld $8, %xmm0, %xmm0
+; AVX-NEXT:    vpsrad $8, %xmm0, %xmm0
+; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpslld $8, %xmm1, %xmm0
+; AVX-NEXT:    vpsrad $8, %xmm0, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpextrd $3, %xmm1, %eax
+; AVX-NEXT:    movw %ax, 9(%rdi)
+; AVX-NEXT:    vpextrd $2, %xmm1, %ecx
+; AVX-NEXT:    movw %cx, 6(%rdi)
+; AVX-NEXT:    vpextrd $1, %xmm1, %edx
+; AVX-NEXT:    movw %dx, 3(%rdi)
+; AVX-NEXT:    vmovd %xmm1, %esi
+; AVX-NEXT:    movw %si, (%rdi)
+; AVX-NEXT:    shrl $16, %eax
+; AVX-NEXT:    movb %al, 11(%rdi)
+; AVX-NEXT:    shrl $16, %ecx
+; AVX-NEXT:    movb %cl, 8(%rdi)
+; AVX-NEXT:    shrl $16, %edx
+; AVX-NEXT:    movb %dl, 5(%rdi)
+; AVX-NEXT:    shrl $16, %esi
+; AVX-NEXT:    movb %sil, 2(%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: ssubo_v4i24:
 ; AVX512:       # %bb.0:
@@ -1095,37 +1014,21 @@ define <4 x i32> @ssubo_v4i1(<4 x i1> %a0, <4 x i1> %a1, <4 x i1>* %p2) nounwind
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: ssubo_v4i1:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpslld $31, %xmm1, %xmm1
-; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm1
-; AVX1-NEXT:    vpslld $31, %xmm0, %xmm0
-; AVX1-NEXT:    vpsrad $31, %xmm0, %xmm0
-; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT:    vpslld $31, %xmm0, %xmm1
-; AVX1-NEXT:    vpsrad $31, %xmm1, %xmm2
-; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm2, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovmskps %xmm1, %eax
-; AVX1-NEXT:    movb %al, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: ssubo_v4i1:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpslld $31, %xmm1, %xmm1
-; AVX2-NEXT:    vpsrad $31, %xmm1, %xmm1
-; AVX2-NEXT:    vpslld $31, %xmm0, %xmm0
-; AVX2-NEXT:    vpsrad $31, %xmm0, %xmm0
-; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT:    vpslld $31, %xmm0, %xmm1
-; AVX2-NEXT:    vpsrad $31, %xmm1, %xmm2
-; AVX2-NEXT:    vpcmpeqd %xmm0, %xmm2, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vmovmskps %xmm1, %eax
-; AVX2-NEXT:    movb %al, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: ssubo_v4i1:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpslld $31, %xmm1, %xmm1
+; AVX-NEXT:    vpsrad $31, %xmm1, %xmm1
+; AVX-NEXT:    vpslld $31, %xmm0, %xmm0
+; AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
+; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vpslld $31, %xmm0, %xmm1
+; AVX-NEXT:    vpsrad $31, %xmm1, %xmm2
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vmovmskps %xmm1, %eax
+; AVX-NEXT:    movb %al, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: ssubo_v4i1:
 ; AVX512:       # %bb.0:
@@ -1217,47 +1120,26 @@ define <2 x i32> @ssubo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; SSE41-NEXT:    movq %rsi, 8(%r10)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: ssubo_v2i128:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX1-NEXT:    subq %r8, %rdi
-; AVX1-NEXT:    sbbq %r9, %rsi
-; AVX1-NEXT:    seto %r8b
-; AVX1-NEXT:    subq {{[0-9]+}}(%rsp), %rdx
-; AVX1-NEXT:    sbbq {{[0-9]+}}(%rsp), %rcx
-; AVX1-NEXT:    seto %al
-; AVX1-NEXT:    movzbl %al, %r9d
-; AVX1-NEXT:    negl %r9d
-; AVX1-NEXT:    movzbl %r8b, %eax
-; AVX1-NEXT:    negl %eax
-; AVX1-NEXT:    vmovd %eax, %xmm0
-; AVX1-NEXT:    vpinsrd $1, %r9d, %xmm0, %xmm0
-; AVX1-NEXT:    movq %rdx, 16(%r10)
-; AVX1-NEXT:    movq %rdi, (%r10)
-; AVX1-NEXT:    movq %rcx, 24(%r10)
-; AVX1-NEXT:    movq %rsi, 8(%r10)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: ssubo_v2i128:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX2-NEXT:    subq %r8, %rdi
-; AVX2-NEXT:    sbbq %r9, %rsi
-; AVX2-NEXT:    seto %r8b
-; AVX2-NEXT:    subq {{[0-9]+}}(%rsp), %rdx
-; AVX2-NEXT:    sbbq {{[0-9]+}}(%rsp), %rcx
-; AVX2-NEXT:    seto %al
-; AVX2-NEXT:    movzbl %al, %r9d
-; AVX2-NEXT:    negl %r9d
-; AVX2-NEXT:    movzbl %r8b, %eax
-; AVX2-NEXT:    negl %eax
-; AVX2-NEXT:    vmovd %eax, %xmm0
-; AVX2-NEXT:    vpinsrd $1, %r9d, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rdx, 16(%r10)
-; AVX2-NEXT:    movq %rdi, (%r10)
-; AVX2-NEXT:    movq %rcx, 24(%r10)
-; AVX2-NEXT:    movq %rsi, 8(%r10)
-; AVX2-NEXT:    retq
+; AVX-LABEL: ssubo_v2i128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX-NEXT:    subq %r8, %rdi
+; AVX-NEXT:    sbbq %r9, %rsi
+; AVX-NEXT:    seto %r8b
+; AVX-NEXT:    subq {{[0-9]+}}(%rsp), %rdx
+; AVX-NEXT:    sbbq {{[0-9]+}}(%rsp), %rcx
+; AVX-NEXT:    seto %al
+; AVX-NEXT:    movzbl %al, %r9d
+; AVX-NEXT:    negl %r9d
+; AVX-NEXT:    movzbl %r8b, %eax
+; AVX-NEXT:    negl %eax
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    vpinsrd $1, %r9d, %xmm0, %xmm0
+; AVX-NEXT:    movq %rdx, 16(%r10)
+; AVX-NEXT:    movq %rdi, (%r10)
+; AVX-NEXT:    movq %rcx, 24(%r10)
+; AVX-NEXT:    movq %rsi, 8(%r10)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: ssubo_v2i128:
 ; AVX512:       # %bb.0:

diff  --git a/llvm/test/CodeGen/X86/vec_uaddo.ll b/llvm/test/CodeGen/X86/vec_uaddo.ll
index 5954f1195741..83db253d122d 100644
--- a/llvm/test/CodeGen/X86/vec_uaddo.ll
+++ b/llvm/test/CodeGen/X86/vec_uaddo.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=CHECK,SSE,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=CHECK,AVX512
 
 declare {<1 x i32>, <1 x i1>} @llvm.uadd.with.overflow.v1i32(<1 x i32>, <1 x i32>)
 declare {<2 x i32>, <2 x i1>} @llvm.uadd.with.overflow.v2i32(<2 x i32>, <2 x i32>)
@@ -23,19 +23,12 @@ declare {<4 x i1>, <4 x i1>} @llvm.uadd.with.overflow.v4i1(<4 x i1>, <4 x i1>)
 declare {<2 x i128>, <2 x i1>} @llvm.uadd.with.overflow.v2i128(<2 x i128>, <2 x i128>)
 
 define <1 x i32> @uaddo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) nounwind {
-; SSE-LABEL: uaddo_v1i32:
-; SSE:       # %bb.0:
-; SSE-NEXT:    addl %esi, %edi
-; SSE-NEXT:    sbbl %eax, %eax
-; SSE-NEXT:    movl %edi, (%rdx)
-; SSE-NEXT:    retq
-;
-; AVX-LABEL: uaddo_v1i32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    addl %esi, %edi
-; AVX-NEXT:    sbbl %eax, %eax
-; AVX-NEXT:    movl %edi, (%rdx)
-; AVX-NEXT:    retq
+; CHECK-LABEL: uaddo_v1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addl %esi, %edi
+; CHECK-NEXT:    sbbl %eax, %eax
+; CHECK-NEXT:    movl %edi, (%rdx)
+; CHECK-NEXT:    retq
   %t = call {<1 x i32>, <1 x i1>} @llvm.uadd.with.overflow.v1i32(<1 x i32> %a0, <1 x i32> %a1)
   %val = extractvalue {<1 x i32>, <1 x i1>} %t, 0
   %obit = extractvalue {<1 x i32>, <1 x i1>} %t, 1
@@ -75,25 +68,15 @@ define <2 x i32> @uaddo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun
 ; SSE41-NEXT:    movq %xmm1, (%rdi)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: uaddo_v2i32:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpmaxud %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovq %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: uaddo_v2i32:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpmaxud %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vmovq %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: uaddo_v2i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpmaxud %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vmovq %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: uaddo_v2i32:
 ; AVX512:       # %bb.0:
@@ -147,27 +130,16 @@ define <3 x i32> @uaddo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun
 ; SSE41-NEXT:    movq %xmm1, (%rdi)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: uaddo_v3i32:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpmaxud %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpextrd $2, %xmm1, 8(%rdi)
-; AVX1-NEXT:    vmovq %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: uaddo_v3i32:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpmaxud %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vpextrd $2, %xmm1, 8(%rdi)
-; AVX2-NEXT:    vmovq %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: uaddo_v3i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpmaxud %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpextrd $2, %xmm1, 8(%rdi)
+; AVX-NEXT:    vmovq %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: uaddo_v3i32:
 ; AVX512:       # %bb.0:
@@ -217,25 +189,15 @@ define <4 x i32> @uaddo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) noun
 ; SSE41-NEXT:    movdqa %xmm1, (%rdi)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: uaddo_v4i32:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpmaxud %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: uaddo_v4i32:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpmaxud %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: uaddo_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpmaxud %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vmovdqa %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: uaddo_v4i32:
 ; AVX512:       # %bb.0:
@@ -901,27 +863,16 @@ define <2 x i32> @uaddo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun
 ; SSE-NEXT:    movdqa %xmm1, (%rdi)
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: uaddo_v2i64:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm0
-; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: uaddo_v2i64:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm3
-; AVX2-NEXT:    vpaddq %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm0
-; AVX2-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm0
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: uaddo_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm3
+; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpxor %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm0
+; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX-NEXT:    vmovdqa %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: uaddo_v2i64:
 ; AVX512:       # %bb.0:
@@ -1247,39 +1198,22 @@ define <2 x i32> @uaddo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; SSE41-NEXT:    movq %rsi, 8(%r10)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: uaddo_v2i128:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX1-NEXT:    addq {{[0-9]+}}(%rsp), %rdx
-; AVX1-NEXT:    adcq {{[0-9]+}}(%rsp), %rcx
-; AVX1-NEXT:    sbbl %r11d, %r11d
-; AVX1-NEXT:    addq %r8, %rdi
-; AVX1-NEXT:    adcq %r9, %rsi
-; AVX1-NEXT:    sbbl %eax, %eax
-; AVX1-NEXT:    vmovd %eax, %xmm0
-; AVX1-NEXT:    vpinsrd $1, %r11d, %xmm0, %xmm0
-; AVX1-NEXT:    movq %rdx, 16(%r10)
-; AVX1-NEXT:    movq %rdi, (%r10)
-; AVX1-NEXT:    movq %rcx, 24(%r10)
-; AVX1-NEXT:    movq %rsi, 8(%r10)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: uaddo_v2i128:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX2-NEXT:    addq {{[0-9]+}}(%rsp), %rdx
-; AVX2-NEXT:    adcq {{[0-9]+}}(%rsp), %rcx
-; AVX2-NEXT:    sbbl %r11d, %r11d
-; AVX2-NEXT:    addq %r8, %rdi
-; AVX2-NEXT:    adcq %r9, %rsi
-; AVX2-NEXT:    sbbl %eax, %eax
-; AVX2-NEXT:    vmovd %eax, %xmm0
-; AVX2-NEXT:    vpinsrd $1, %r11d, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rdx, 16(%r10)
-; AVX2-NEXT:    movq %rdi, (%r10)
-; AVX2-NEXT:    movq %rcx, 24(%r10)
-; AVX2-NEXT:    movq %rsi, 8(%r10)
-; AVX2-NEXT:    retq
+; AVX-LABEL: uaddo_v2i128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX-NEXT:    addq {{[0-9]+}}(%rsp), %rdx
+; AVX-NEXT:    adcq {{[0-9]+}}(%rsp), %rcx
+; AVX-NEXT:    sbbl %r11d, %r11d
+; AVX-NEXT:    addq %r8, %rdi
+; AVX-NEXT:    adcq %r9, %rsi
+; AVX-NEXT:    sbbl %eax, %eax
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    vpinsrd $1, %r11d, %xmm0, %xmm0
+; AVX-NEXT:    movq %rdx, 16(%r10)
+; AVX-NEXT:    movq %rdi, (%r10)
+; AVX-NEXT:    movq %rcx, 24(%r10)
+; AVX-NEXT:    movq %rsi, 8(%r10)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: uaddo_v2i128:
 ; AVX512:       # %bb.0:

diff  --git a/llvm/test/CodeGen/X86/vec_umulo.ll b/llvm/test/CodeGen/X86/vec_umulo.ll
index 4766fe90a3d4..ef028899836e 100644
--- a/llvm/test/CodeGen/X86/vec_umulo.ll
+++ b/llvm/test/CodeGen/X86/vec_umulo.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=CHECK,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=CHECK,AVX512
 
 declare {<1 x i32>, <1 x i1>} @llvm.umul.with.overflow.v1i32(<1 x i32>, <1 x i32>)
 declare {<2 x i32>, <2 x i1>} @llvm.umul.with.overflow.v2i32(<2 x i32>, <2 x i32>)
@@ -25,29 +25,17 @@ declare {<4 x i1>, <4 x i1>} @llvm.umul.with.overflow.v4i1(<4 x i1>, <4 x i1>)
 declare {<2 x i128>, <2 x i1>} @llvm.umul.with.overflow.v2i128(<2 x i128>, <2 x i128>)
 
 define <1 x i32> @umulo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) nounwind {
-; SSE-LABEL: umulo_v1i32:
-; SSE:       # %bb.0:
-; SSE-NEXT:    movq %rdx, %rcx
-; SSE-NEXT:    movl %edi, %eax
-; SSE-NEXT:    xorl %edi, %edi
-; SSE-NEXT:    mull %esi
-; SSE-NEXT:    seto %dil
-; SSE-NEXT:    negl %edi
-; SSE-NEXT:    movl %eax, (%rcx)
-; SSE-NEXT:    movl %edi, %eax
-; SSE-NEXT:    retq
-;
-; AVX-LABEL: umulo_v1i32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    movq %rdx, %rcx
-; AVX-NEXT:    movl %edi, %eax
-; AVX-NEXT:    xorl %edi, %edi
-; AVX-NEXT:    mull %esi
-; AVX-NEXT:    seto %dil
-; AVX-NEXT:    negl %edi
-; AVX-NEXT:    movl %eax, (%rcx)
-; AVX-NEXT:    movl %edi, %eax
-; AVX-NEXT:    retq
+; CHECK-LABEL: umulo_v1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rdx, %rcx
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    xorl %edi, %edi
+; CHECK-NEXT:    mull %esi
+; CHECK-NEXT:    seto %dil
+; CHECK-NEXT:    negl %edi
+; CHECK-NEXT:    movl %eax, (%rcx)
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    retq
   %t = call {<1 x i32>, <1 x i1>} @llvm.umul.with.overflow.v1i32(<1 x i32> %a0, <1 x i32> %a1)
   %val = extractvalue {<1 x i32>, <1 x i1>} %t, 0
   %obit = extractvalue {<1 x i32>, <1 x i1>} %t, 1
@@ -2720,53 +2708,29 @@ define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun
 ; SSE41-NEXT:    movdqa %xmm1, (%rdi)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: umulo_v2i64:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovq %xmm0, %r10
-; AVX1-NEXT:    vmovq %xmm1, %r8
-; AVX1-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX1-NEXT:    vpextrq $1, %xmm1, %rdx
-; AVX1-NEXT:    xorl %esi, %esi
-; AVX1-NEXT:    mulq %rdx
-; AVX1-NEXT:    movq $-1, %r9
-; AVX1-NEXT:    movl $0, %ecx
-; AVX1-NEXT:    cmovoq %r9, %rcx
-; AVX1-NEXT:    vmovq %rax, %xmm0
-; AVX1-NEXT:    movq %r10, %rax
-; AVX1-NEXT:    mulq %r8
-; AVX1-NEXT:    vmovq %rax, %xmm1
-; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; AVX1-NEXT:    vmovq %rcx, %xmm0
-; AVX1-NEXT:    cmovoq %r9, %rsi
-; AVX1-NEXT:    vmovq %rsi, %xmm2
-; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: umulo_v2i64:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovq %xmm0, %r10
-; AVX2-NEXT:    vmovq %xmm1, %r8
-; AVX2-NEXT:    vpextrq $1, %xmm0, %rax
-; AVX2-NEXT:    vpextrq $1, %xmm1, %rdx
-; AVX2-NEXT:    xorl %esi, %esi
-; AVX2-NEXT:    mulq %rdx
-; AVX2-NEXT:    movq $-1, %r9
-; AVX2-NEXT:    movl $0, %ecx
-; AVX2-NEXT:    cmovoq %r9, %rcx
-; AVX2-NEXT:    vmovq %rax, %xmm0
-; AVX2-NEXT:    movq %r10, %rax
-; AVX2-NEXT:    mulq %r8
-; AVX2-NEXT:    vmovq %rax, %xmm1
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; AVX2-NEXT:    vmovq %rcx, %xmm0
-; AVX2-NEXT:    cmovoq %r9, %rsi
-; AVX2-NEXT:    vmovq %rsi, %xmm2
-; AVX2-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: umulo_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovq %xmm0, %r10
+; AVX-NEXT:    vmovq %xmm1, %r8
+; AVX-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX-NEXT:    vpextrq $1, %xmm1, %rdx
+; AVX-NEXT:    xorl %esi, %esi
+; AVX-NEXT:    mulq %rdx
+; AVX-NEXT:    movq $-1, %r9
+; AVX-NEXT:    movl $0, %ecx
+; AVX-NEXT:    cmovoq %r9, %rcx
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    movq %r10, %rax
+; AVX-NEXT:    mulq %r8
+; AVX-NEXT:    vmovq %rax, %xmm1
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; AVX-NEXT:    vmovq %rcx, %xmm0
+; AVX-NEXT:    cmovoq %r9, %rsi
+; AVX-NEXT:    vmovq %rsi, %xmm2
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX-NEXT:    vmovdqa %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: umulo_v2i64:
 ; AVX512:       # %bb.0:
@@ -3532,157 +3496,81 @@ define <2 x i32> @umulo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; SSE41-NEXT:    popq %rbp
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: umulo_v2i128:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    pushq %rbp
-; AVX1-NEXT:    pushq %r15
-; AVX1-NEXT:    pushq %r14
-; AVX1-NEXT:    pushq %r13
-; AVX1-NEXT:    pushq %r12
-; AVX1-NEXT:    pushq %rbx
-; AVX1-NEXT:    movq %r9, %r10
-; AVX1-NEXT:    movq %rcx, %r12
-; AVX1-NEXT:    movq %rdx, %r11
-; AVX1-NEXT:    movq %rsi, %rax
-; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %r14
-; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %r15
-; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %r9
-; AVX1-NEXT:    testq %r10, %r10
-; AVX1-NEXT:    setne %cl
-; AVX1-NEXT:    testq %rsi, %rsi
-; AVX1-NEXT:    setne %r13b
-; AVX1-NEXT:    andb %cl, %r13b
-; AVX1-NEXT:    mulq %r8
-; AVX1-NEXT:    movq %rax, %rsi
-; AVX1-NEXT:    seto %bpl
-; AVX1-NEXT:    movq %r10, %rax
-; AVX1-NEXT:    mulq %rdi
-; AVX1-NEXT:    movq %rax, %rcx
-; AVX1-NEXT:    seto %bl
-; AVX1-NEXT:    orb %bpl, %bl
-; AVX1-NEXT:    addq %rsi, %rcx
-; AVX1-NEXT:    movq %rdi, %rax
-; AVX1-NEXT:    mulq %r8
-; AVX1-NEXT:    movq %rax, %rdi
-; AVX1-NEXT:    movq %rdx, %rsi
-; AVX1-NEXT:    addq %rcx, %rsi
-; AVX1-NEXT:    setb %cl
-; AVX1-NEXT:    orb %bl, %cl
-; AVX1-NEXT:    orb %r13b, %cl
-; AVX1-NEXT:    testq %r9, %r9
-; AVX1-NEXT:    setne %al
-; AVX1-NEXT:    testq %r12, %r12
-; AVX1-NEXT:    setne %r8b
-; AVX1-NEXT:    andb %al, %r8b
-; AVX1-NEXT:    movq %r12, %rax
-; AVX1-NEXT:    mulq %r15
-; AVX1-NEXT:    movq %rax, %rbp
-; AVX1-NEXT:    seto %r10b
-; AVX1-NEXT:    movq %r9, %rax
-; AVX1-NEXT:    mulq %r11
-; AVX1-NEXT:    movq %rax, %rbx
-; AVX1-NEXT:    seto %r9b
-; AVX1-NEXT:    orb %r10b, %r9b
-; AVX1-NEXT:    addq %rbp, %rbx
-; AVX1-NEXT:    movq %r11, %rax
-; AVX1-NEXT:    mulq %r15
-; AVX1-NEXT:    addq %rbx, %rdx
-; AVX1-NEXT:    setb %bl
-; AVX1-NEXT:    orb %r9b, %bl
-; AVX1-NEXT:    orb %r8b, %bl
-; AVX1-NEXT:    movzbl %bl, %ebp
-; AVX1-NEXT:    negl %ebp
-; AVX1-NEXT:    movzbl %cl, %ecx
-; AVX1-NEXT:    negl %ecx
-; AVX1-NEXT:    vmovd %ecx, %xmm0
-; AVX1-NEXT:    vpinsrd $1, %ebp, %xmm0, %xmm0
-; AVX1-NEXT:    movq %rax, 16(%r14)
-; AVX1-NEXT:    movq %rdi, (%r14)
-; AVX1-NEXT:    movq %rdx, 24(%r14)
-; AVX1-NEXT:    movq %rsi, 8(%r14)
-; AVX1-NEXT:    popq %rbx
-; AVX1-NEXT:    popq %r12
-; AVX1-NEXT:    popq %r13
-; AVX1-NEXT:    popq %r14
-; AVX1-NEXT:    popq %r15
-; AVX1-NEXT:    popq %rbp
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: umulo_v2i128:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    pushq %rbp
-; AVX2-NEXT:    pushq %r15
-; AVX2-NEXT:    pushq %r14
-; AVX2-NEXT:    pushq %r13
-; AVX2-NEXT:    pushq %r12
-; AVX2-NEXT:    pushq %rbx
-; AVX2-NEXT:    movq %r9, %r10
-; AVX2-NEXT:    movq %rcx, %r12
-; AVX2-NEXT:    movq %rdx, %r11
-; AVX2-NEXT:    movq %rsi, %rax
-; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %r14
-; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %r15
-; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %r9
-; AVX2-NEXT:    testq %r10, %r10
-; AVX2-NEXT:    setne %cl
-; AVX2-NEXT:    testq %rsi, %rsi
-; AVX2-NEXT:    setne %r13b
-; AVX2-NEXT:    andb %cl, %r13b
-; AVX2-NEXT:    mulq %r8
-; AVX2-NEXT:    movq %rax, %rsi
-; AVX2-NEXT:    seto %bpl
-; AVX2-NEXT:    movq %r10, %rax
-; AVX2-NEXT:    mulq %rdi
-; AVX2-NEXT:    movq %rax, %rcx
-; AVX2-NEXT:    seto %bl
-; AVX2-NEXT:    orb %bpl, %bl
-; AVX2-NEXT:    addq %rsi, %rcx
-; AVX2-NEXT:    movq %rdi, %rax
-; AVX2-NEXT:    mulq %r8
-; AVX2-NEXT:    movq %rax, %rdi
-; AVX2-NEXT:    movq %rdx, %rsi
-; AVX2-NEXT:    addq %rcx, %rsi
-; AVX2-NEXT:    setb %cl
-; AVX2-NEXT:    orb %bl, %cl
-; AVX2-NEXT:    orb %r13b, %cl
-; AVX2-NEXT:    testq %r9, %r9
-; AVX2-NEXT:    setne %al
-; AVX2-NEXT:    testq %r12, %r12
-; AVX2-NEXT:    setne %r8b
-; AVX2-NEXT:    andb %al, %r8b
-; AVX2-NEXT:    movq %r12, %rax
-; AVX2-NEXT:    mulq %r15
-; AVX2-NEXT:    movq %rax, %rbp
-; AVX2-NEXT:    seto %r10b
-; AVX2-NEXT:    movq %r9, %rax
-; AVX2-NEXT:    mulq %r11
-; AVX2-NEXT:    movq %rax, %rbx
-; AVX2-NEXT:    seto %r9b
-; AVX2-NEXT:    orb %r10b, %r9b
-; AVX2-NEXT:    addq %rbp, %rbx
-; AVX2-NEXT:    movq %r11, %rax
-; AVX2-NEXT:    mulq %r15
-; AVX2-NEXT:    addq %rbx, %rdx
-; AVX2-NEXT:    setb %bl
-; AVX2-NEXT:    orb %r9b, %bl
-; AVX2-NEXT:    orb %r8b, %bl
-; AVX2-NEXT:    movzbl %bl, %ebp
-; AVX2-NEXT:    negl %ebp
-; AVX2-NEXT:    movzbl %cl, %ecx
-; AVX2-NEXT:    negl %ecx
-; AVX2-NEXT:    vmovd %ecx, %xmm0
-; AVX2-NEXT:    vpinsrd $1, %ebp, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rax, 16(%r14)
-; AVX2-NEXT:    movq %rdi, (%r14)
-; AVX2-NEXT:    movq %rdx, 24(%r14)
-; AVX2-NEXT:    movq %rsi, 8(%r14)
-; AVX2-NEXT:    popq %rbx
-; AVX2-NEXT:    popq %r12
-; AVX2-NEXT:    popq %r13
-; AVX2-NEXT:    popq %r14
-; AVX2-NEXT:    popq %r15
-; AVX2-NEXT:    popq %rbp
-; AVX2-NEXT:    retq
+; AVX-LABEL: umulo_v2i128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    pushq %rbp
+; AVX-NEXT:    pushq %r15
+; AVX-NEXT:    pushq %r14
+; AVX-NEXT:    pushq %r13
+; AVX-NEXT:    pushq %r12
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    movq %r9, %r10
+; AVX-NEXT:    movq %rcx, %r12
+; AVX-NEXT:    movq %rdx, %r11
+; AVX-NEXT:    movq %rsi, %rax
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %r14
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %r15
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %r9
+; AVX-NEXT:    testq %r10, %r10
+; AVX-NEXT:    setne %cl
+; AVX-NEXT:    testq %rsi, %rsi
+; AVX-NEXT:    setne %r13b
+; AVX-NEXT:    andb %cl, %r13b
+; AVX-NEXT:    mulq %r8
+; AVX-NEXT:    movq %rax, %rsi
+; AVX-NEXT:    seto %bpl
+; AVX-NEXT:    movq %r10, %rax
+; AVX-NEXT:    mulq %rdi
+; AVX-NEXT:    movq %rax, %rcx
+; AVX-NEXT:    seto %bl
+; AVX-NEXT:    orb %bpl, %bl
+; AVX-NEXT:    addq %rsi, %rcx
+; AVX-NEXT:    movq %rdi, %rax
+; AVX-NEXT:    mulq %r8
+; AVX-NEXT:    movq %rax, %rdi
+; AVX-NEXT:    movq %rdx, %rsi
+; AVX-NEXT:    addq %rcx, %rsi
+; AVX-NEXT:    setb %cl
+; AVX-NEXT:    orb %bl, %cl
+; AVX-NEXT:    orb %r13b, %cl
+; AVX-NEXT:    testq %r9, %r9
+; AVX-NEXT:    setne %al
+; AVX-NEXT:    testq %r12, %r12
+; AVX-NEXT:    setne %r8b
+; AVX-NEXT:    andb %al, %r8b
+; AVX-NEXT:    movq %r12, %rax
+; AVX-NEXT:    mulq %r15
+; AVX-NEXT:    movq %rax, %rbp
+; AVX-NEXT:    seto %r10b
+; AVX-NEXT:    movq %r9, %rax
+; AVX-NEXT:    mulq %r11
+; AVX-NEXT:    movq %rax, %rbx
+; AVX-NEXT:    seto %r9b
+; AVX-NEXT:    orb %r10b, %r9b
+; AVX-NEXT:    addq %rbp, %rbx
+; AVX-NEXT:    movq %r11, %rax
+; AVX-NEXT:    mulq %r15
+; AVX-NEXT:    addq %rbx, %rdx
+; AVX-NEXT:    setb %bl
+; AVX-NEXT:    orb %r9b, %bl
+; AVX-NEXT:    orb %r8b, %bl
+; AVX-NEXT:    movzbl %bl, %ebp
+; AVX-NEXT:    negl %ebp
+; AVX-NEXT:    movzbl %cl, %ecx
+; AVX-NEXT:    negl %ecx
+; AVX-NEXT:    vmovd %ecx, %xmm0
+; AVX-NEXT:    vpinsrd $1, %ebp, %xmm0, %xmm0
+; AVX-NEXT:    movq %rax, 16(%r14)
+; AVX-NEXT:    movq %rdi, (%r14)
+; AVX-NEXT:    movq %rdx, 24(%r14)
+; AVX-NEXT:    movq %rsi, 8(%r14)
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    popq %r12
+; AVX-NEXT:    popq %r13
+; AVX-NEXT:    popq %r14
+; AVX-NEXT:    popq %r15
+; AVX-NEXT:    popq %rbp
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: umulo_v2i128:
 ; AVX512:       # %bb.0:

diff  --git a/llvm/test/CodeGen/X86/vec_usubo.ll b/llvm/test/CodeGen/X86/vec_usubo.ll
index afb0f6cce29c..69d66ebcdb69 100644
--- a/llvm/test/CodeGen/X86/vec_usubo.ll
+++ b/llvm/test/CodeGen/X86/vec_usubo.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=CHECK,SSE,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=CHECK,AVX512
 
 declare {<1 x i32>, <1 x i1>} @llvm.usub.with.overflow.v1i32(<1 x i32>, <1 x i32>)
 declare {<2 x i32>, <2 x i1>} @llvm.usub.with.overflow.v2i32(<2 x i32>, <2 x i32>)
@@ -23,19 +23,12 @@ declare {<4 x i1>, <4 x i1>} @llvm.usub.with.overflow.v4i1(<4 x i1>, <4 x i1>)
 declare {<2 x i128>, <2 x i1>} @llvm.usub.with.overflow.v2i128(<2 x i128>, <2 x i128>)
 
 define <1 x i32> @usubo_v1i32(<1 x i32> %a0, <1 x i32> %a1, <1 x i32>* %p2) nounwind {
-; SSE-LABEL: usubo_v1i32:
-; SSE:       # %bb.0:
-; SSE-NEXT:    subl %esi, %edi
-; SSE-NEXT:    sbbl %eax, %eax
-; SSE-NEXT:    movl %edi, (%rdx)
-; SSE-NEXT:    retq
-;
-; AVX-LABEL: usubo_v1i32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    subl %esi, %edi
-; AVX-NEXT:    sbbl %eax, %eax
-; AVX-NEXT:    movl %edi, (%rdx)
-; AVX-NEXT:    retq
+; CHECK-LABEL: usubo_v1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subl %esi, %edi
+; CHECK-NEXT:    sbbl %eax, %eax
+; CHECK-NEXT:    movl %edi, (%rdx)
+; CHECK-NEXT:    retq
   %t = call {<1 x i32>, <1 x i1>} @llvm.usub.with.overflow.v1i32(<1 x i32> %a0, <1 x i32> %a1)
   %val = extractvalue {<1 x i32>, <1 x i1>} %t, 0
   %obit = extractvalue {<1 x i32>, <1 x i1>} %t, 1
@@ -80,25 +73,15 @@ define <2 x i32> @usubo_v2i32(<2 x i32> %a0, <2 x i32> %a1, <2 x i32>* %p2) noun
 ; SSE41-NEXT:    movq %xmm2, (%rdi)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: usubo_v2i32:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpminud %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovq %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: usubo_v2i32:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpminud %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vmovq %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: usubo_v2i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpminud %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vmovq %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: usubo_v2i32:
 ; AVX512:       # %bb.0:
@@ -157,27 +140,16 @@ define <3 x i32> @usubo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun
 ; SSE41-NEXT:    movq %xmm2, (%rdi)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: usubo_v3i32:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpminud %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vpextrd $2, %xmm1, 8(%rdi)
-; AVX1-NEXT:    vmovq %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: usubo_v3i32:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpminud %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vpextrd $2, %xmm1, 8(%rdi)
-; AVX2-NEXT:    vmovq %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: usubo_v3i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpminud %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpextrd $2, %xmm1, 8(%rdi)
+; AVX-NEXT:    vmovq %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: usubo_v3i32:
 ; AVX512:       # %bb.0:
@@ -232,25 +204,15 @@ define <4 x i32> @usubo_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32>* %p2) noun
 ; SSE41-NEXT:    movdqa %xmm2, (%rdi)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: usubo_v4i32:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpminud %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: usubo_v4i32:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpminud %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: usubo_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpminud %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vmovdqa %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: usubo_v4i32:
 ; AVX512:       # %bb.0:
@@ -948,27 +910,16 @@ define <2 x i32> @usubo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: usubo_v2i64:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX1-NEXT:    vpxor %xmm2, %xmm0, %xmm3
-; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
-; AVX1-NEXT:    vpxor %xmm2, %xmm1, %xmm0
-; AVX1-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm0
-; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: usubo_v2i64:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX2-NEXT:    vpxor %xmm2, %xmm0, %xmm3
-; AVX2-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
-; AVX2-NEXT:    vpxor %xmm2, %xmm1, %xmm0
-; AVX2-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm0
-; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-NEXT:    vmovdqa %xmm1, (%rdi)
-; AVX2-NEXT:    retq
+; AVX-LABEL: usubo_v2i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm3
+; AVX-NEXT:    vpsubq %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpxor %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm0
+; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX-NEXT:    vmovdqa %xmm1, (%rdi)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: usubo_v2i64:
 ; AVX512:       # %bb.0:
@@ -1294,39 +1245,22 @@ define <2 x i32> @usubo_v2i128(<2 x i128> %a0, <2 x i128> %a1, <2 x i128>* %p2)
 ; SSE41-NEXT:    movq %rsi, 8(%r10)
 ; SSE41-NEXT:    retq
 ;
-; AVX1-LABEL: usubo_v2i128:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX1-NEXT:    subq {{[0-9]+}}(%rsp), %rdx
-; AVX1-NEXT:    sbbq {{[0-9]+}}(%rsp), %rcx
-; AVX1-NEXT:    sbbl %r11d, %r11d
-; AVX1-NEXT:    subq %r8, %rdi
-; AVX1-NEXT:    sbbq %r9, %rsi
-; AVX1-NEXT:    sbbl %eax, %eax
-; AVX1-NEXT:    vmovd %eax, %xmm0
-; AVX1-NEXT:    vpinsrd $1, %r11d, %xmm0, %xmm0
-; AVX1-NEXT:    movq %rdx, 16(%r10)
-; AVX1-NEXT:    movq %rdi, (%r10)
-; AVX1-NEXT:    movq %rcx, 24(%r10)
-; AVX1-NEXT:    movq %rsi, 8(%r10)
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: usubo_v2i128:
-; AVX2:       # %bb.0:
-; AVX2-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX2-NEXT:    subq {{[0-9]+}}(%rsp), %rdx
-; AVX2-NEXT:    sbbq {{[0-9]+}}(%rsp), %rcx
-; AVX2-NEXT:    sbbl %r11d, %r11d
-; AVX2-NEXT:    subq %r8, %rdi
-; AVX2-NEXT:    sbbq %r9, %rsi
-; AVX2-NEXT:    sbbl %eax, %eax
-; AVX2-NEXT:    vmovd %eax, %xmm0
-; AVX2-NEXT:    vpinsrd $1, %r11d, %xmm0, %xmm0
-; AVX2-NEXT:    movq %rdx, 16(%r10)
-; AVX2-NEXT:    movq %rdi, (%r10)
-; AVX2-NEXT:    movq %rcx, 24(%r10)
-; AVX2-NEXT:    movq %rsi, 8(%r10)
-; AVX2-NEXT:    retq
+; AVX-LABEL: usubo_v2i128:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX-NEXT:    subq {{[0-9]+}}(%rsp), %rdx
+; AVX-NEXT:    sbbq {{[0-9]+}}(%rsp), %rcx
+; AVX-NEXT:    sbbl %r11d, %r11d
+; AVX-NEXT:    subq %r8, %rdi
+; AVX-NEXT:    sbbq %r9, %rsi
+; AVX-NEXT:    sbbl %eax, %eax
+; AVX-NEXT:    vmovd %eax, %xmm0
+; AVX-NEXT:    vpinsrd $1, %r11d, %xmm0, %xmm0
+; AVX-NEXT:    movq %rdx, 16(%r10)
+; AVX-NEXT:    movq %rdi, (%r10)
+; AVX-NEXT:    movq %rcx, 24(%r10)
+; AVX-NEXT:    movq %rsi, 8(%r10)
+; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: usubo_v2i128:
 ; AVX512:       # %bb.0:


        


More information about the llvm-commits mailing list