[llvm] r352708 - [X86] Add a 32-bit command line to avx512-intrinsics.ll. Move all 64-bit mode only intrinsics to avx512-intrinsics-x86_64.ll.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 30 16:49:41 PST 2019


Author: ctopper
Date: Wed Jan 30 16:49:40 2019
New Revision: 352708

URL: http://llvm.org/viewvc/llvm-project?rev=352708&view=rev
Log:
[X86] Add a 32-bit command line to avx512-intrinsics.ll. Move all 64-bit mode only intrinsics to avx512-intrinsics-x86_64.ll.

Most of the other intrinsic tests have a 32-bit command lines.

Added:
    llvm/trunk/test/CodeGen/X86/avx512-intrinsics-x86_64.ll
Modified:
    llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll

Added: llvm/trunk/test/CodeGen/X86/avx512-intrinsics-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics-x86_64.ll?rev=352708&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics-x86_64.ll (added)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics-x86_64.ll Wed Jan 30 16:49:40 2019
@@ -0,0 +1,249 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f | FileCheck %s
+
+
+define i64 @test_x86_sse2_cvtsd2si64(<2 x double> %a0) {
+; CHECK-LABEL: test_x86_sse2_cvtsd2si64:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvtsd2si %xmm0, %rax
+; CHECK-NEXT:    retq
+  %res = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
+  ret i64 %res
+}
+declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
+
+define <2 x double> @test_x86_sse2_cvtsi642sd(<2 x double> %a0, i64 %a1) {
+; CHECK-LABEL: test_x86_sse2_cvtsi642sd:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1]
+  ret <2 x double> %res
+}
+declare <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double>, i64) nounwind readnone
+
+define i64 @test_x86_avx512_cvttsd2si64(<2 x double> %a0) {
+; CHECK-LABEL: test_x86_avx512_cvttsd2si64:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvttsd2si %xmm0, %rcx
+; CHECK-NEXT:    vcvttsd2si {sae}, %xmm0, %rax
+; CHECK-NEXT:    addq %rcx, %rax
+; CHECK-NEXT:    retq
+  %res0 = call i64 @llvm.x86.avx512.cvttsd2si64(<2 x double> %a0, i32 4) ;
+  %res1 = call i64 @llvm.x86.avx512.cvttsd2si64(<2 x double> %a0, i32 8) ;
+  %res2 = add i64 %res0, %res1
+  ret i64 %res2
+}
+declare i64 @llvm.x86.avx512.cvttsd2si64(<2 x double>, i32) nounwind readnone
+
+define i64 @test_x86_avx512_cvttsd2usi64(<2 x double> %a0) {
+; CHECK-LABEL: test_x86_avx512_cvttsd2usi64:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvttsd2usi %xmm0, %rcx
+; CHECK-NEXT:    vcvttsd2usi {sae}, %xmm0, %rax
+; CHECK-NEXT:    addq %rcx, %rax
+; CHECK-NEXT:    retq
+  %res0 = call i64 @llvm.x86.avx512.cvttsd2usi64(<2 x double> %a0, i32 4) ;
+  %res1 = call i64 @llvm.x86.avx512.cvttsd2usi64(<2 x double> %a0, i32 8) ;
+  %res2 = add i64 %res0, %res1
+  ret i64 %res2
+}
+declare i64 @llvm.x86.avx512.cvttsd2usi64(<2 x double>, i32) nounwind readnone
+
+define i64 @test_x86_sse_cvtss2si64(<4 x float> %a0) {
+; CHECK-LABEL: test_x86_sse_cvtss2si64:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvtss2si %xmm0, %rax
+; CHECK-NEXT:    retq
+  %res = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0) ; <i64> [#uses=1]
+  ret i64 %res
+}
+declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
+
+
+define <4 x float> @test_x86_sse_cvtsi642ss(<4 x float> %a0, i64 %a1) {
+; CHECK-LABEL: test_x86_sse_cvtsi642ss:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1) ; <<4 x float>> [#uses=1]
+  ret <4 x float> %res
+}
+declare <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float>, i64) nounwind readnone
+
+
+define i64 @test_x86_avx512_cvttss2si64(<4 x float> %a0) {
+; CHECK-LABEL: test_x86_avx512_cvttss2si64:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvttss2si %xmm0, %rcx
+; CHECK-NEXT:    vcvttss2si {sae}, %xmm0, %rax
+; CHECK-NEXT:    addq %rcx, %rax
+; CHECK-NEXT:    retq
+  %res0 = call i64 @llvm.x86.avx512.cvttss2si64(<4 x float> %a0, i32 4) ;
+  %res1 = call i64 @llvm.x86.avx512.cvttss2si64(<4 x float> %a0, i32 8) ;
+  %res2 = add i64 %res0, %res1
+  ret i64 %res2
+}
+declare i64 @llvm.x86.avx512.cvttss2si64(<4 x float>, i32) nounwind readnone
+
+define i32 @test_x86_avx512_cvttss2usi(<4 x float> %a0) {
+; CHECK-LABEL: test_x86_avx512_cvttss2usi:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvttss2usi {sae}, %xmm0, %ecx
+; CHECK-NEXT:    vcvttss2usi %xmm0, %eax
+; CHECK-NEXT:    addl %ecx, %eax
+; CHECK-NEXT:    retq
+  %res0 = call i32 @llvm.x86.avx512.cvttss2usi(<4 x float> %a0, i32 8) ;
+  %res1 = call i32 @llvm.x86.avx512.cvttss2usi(<4 x float> %a0, i32 4) ;
+  %res2 = add i32 %res0, %res1
+  ret i32 %res2
+}
+declare i32 @llvm.x86.avx512.cvttss2usi(<4 x float>, i32) nounwind readnone
+
+define i64 @test_x86_avx512_cvttss2usi64(<4 x float> %a0) {
+; CHECK-LABEL: test_x86_avx512_cvttss2usi64:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvttss2usi %xmm0, %rcx
+; CHECK-NEXT:    vcvttss2usi {sae}, %xmm0, %rax
+; CHECK-NEXT:    addq %rcx, %rax
+; CHECK-NEXT:    retq
+  %res0 = call i64 @llvm.x86.avx512.cvttss2usi64(<4 x float> %a0, i32 4) ;
+  %res1 = call i64 @llvm.x86.avx512.cvttss2usi64(<4 x float> %a0, i32 8) ;
+  %res2 = add i64 %res0, %res1
+  ret i64 %res2
+}
+declare i64 @llvm.x86.avx512.cvttss2usi64(<4 x float>, i32) nounwind readnone
+
+define i64 @test_x86_avx512_cvtsd2usi64(<2 x double> %a0) {
+; CHECK-LABEL: test_x86_avx512_cvtsd2usi64:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvtsd2usi %xmm0, %rax
+; CHECK-NEXT:    vcvtsd2usi {rz-sae}, %xmm0, %rcx
+; CHECK-NEXT:    addq %rax, %rcx
+; CHECK-NEXT:    vcvtsd2usi {rd-sae}, %xmm0, %rax
+; CHECK-NEXT:    addq %rcx, %rax
+; CHECK-NEXT:    retq
+
+  %res = call i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double> %a0, i32 4)
+  %res1 = call i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double> %a0, i32 3)
+  %res2 = call i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double> %a0, i32 1)
+  %res3 = add i64 %res, %res1
+  %res4 = add i64 %res3, %res2
+  ret i64 %res4
+}
+declare i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double>, i32) nounwind readnone
+
+define i64 @test_x86_avx512_cvtsd2si64(<2 x double> %a0) {
+; CHECK-LABEL: test_x86_avx512_cvtsd2si64:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvtsd2si %xmm0, %rax
+; CHECK-NEXT:    vcvtsd2si {rz-sae}, %xmm0, %rcx
+; CHECK-NEXT:    addq %rax, %rcx
+; CHECK-NEXT:    vcvtsd2si {rd-sae}, %xmm0, %rax
+; CHECK-NEXT:    addq %rcx, %rax
+; CHECK-NEXT:    retq
+
+  %res = call i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double> %a0, i32 4)
+  %res1 = call i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double> %a0, i32 3)
+  %res2 = call i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double> %a0, i32 1)
+  %res3 = add i64 %res, %res1
+  %res4 = add i64 %res3, %res2
+  ret i64 %res4
+}
+declare i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double>, i32) nounwind readnone
+
+define i64 @test_x86_avx512_cvtss2usi64(<4 x float> %a0) {
+; CHECK-LABEL: test_x86_avx512_cvtss2usi64:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvtss2usi %xmm0, %rax
+; CHECK-NEXT:    vcvtss2usi {rz-sae}, %xmm0, %rcx
+; CHECK-NEXT:    addq %rax, %rcx
+; CHECK-NEXT:    vcvtss2usi {rd-sae}, %xmm0, %rax
+; CHECK-NEXT:    addq %rcx, %rax
+; CHECK-NEXT:    retq
+
+  %res = call i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float> %a0, i32 4)
+  %res1 = call i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float> %a0, i32 3)
+  %res2 = call i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float> %a0, i32 1)
+  %res3 = add i64 %res, %res1
+  %res4 = add i64 %res3, %res2
+  ret i64 %res4
+}
+declare i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float>, i32) nounwind readnone
+
+define i64 @test_x86_avx512_cvtss2si64(<4 x float> %a0) {
+; CHECK-LABEL: test_x86_avx512_cvtss2si64:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvtss2si %xmm0, %rax
+; CHECK-NEXT:    vcvtss2si {rz-sae}, %xmm0, %rcx
+; CHECK-NEXT:    addq %rax, %rcx
+; CHECK-NEXT:    vcvtss2si {rd-sae}, %xmm0, %rax
+; CHECK-NEXT:    addq %rcx, %rax
+; CHECK-NEXT:    retq
+
+  %res = call i64 @llvm.x86.avx512.vcvtss2si64(<4 x float> %a0, i32 4)
+  %res1 = call i64 @llvm.x86.avx512.vcvtss2si64(<4 x float> %a0, i32 3)
+  %res2 = call i64 @llvm.x86.avx512.vcvtss2si64(<4 x float> %a0, i32 1)
+  %res3 = add i64 %res, %res1
+  %res4 = add i64 %res3, %res2
+  ret i64 %res4
+}
+declare i64 @llvm.x86.avx512.vcvtss2si64(<4 x float>, i32) nounwind readnone
+
+define <2 x double> @test_x86_avx512_cvtsi2sd64(<2 x double> %a, i64 %b) {
+; CHECK-LABEL: test_x86_avx512_cvtsi2sd64:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvtsi2sdq %rdi, {rz-sae}, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <2 x double> @llvm.x86.avx512.cvtsi2sd64(<2 x double> %a, i64 %b, i32 3) ; <<<2 x double>> [#uses=1]
+  ret <2 x double> %res
+}
+declare <2 x double> @llvm.x86.avx512.cvtsi2sd64(<2 x double>, i64, i32) nounwind readnone
+
+define <4 x float> @test_x86_avx512_cvtsi2ss64(<4 x float> %a, i64 %b) {
+; CHECK-LABEL: test_x86_avx512_cvtsi2ss64:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvtsi2ssq %rdi, {rz-sae}, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x float> @llvm.x86.avx512.cvtsi2ss64(<4 x float> %a, i64 %b, i32 3) ; <<<4 x float>> [#uses=1]
+  ret <4 x float> %res
+}
+declare <4 x float> @llvm.x86.avx512.cvtsi2ss64(<4 x float>, i64, i32) nounwind readnone
+
+define <4 x float> @_mm_cvt_roundu64_ss (<4 x float> %a, i64 %b) {
+; CHECK-LABEL: _mm_cvt_roundu64_ss:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvtusi2ssq %rdi, {rd-sae}, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x float> @llvm.x86.avx512.cvtusi642ss(<4 x float> %a, i64 %b, i32 1) ; <<<4 x float>> [#uses=1]
+  ret <4 x float> %res
+}
+
+define <4 x float> @_mm_cvtu64_ss(<4 x float> %a, i64 %b) {
+; CHECK-LABEL: _mm_cvtu64_ss:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvtusi2ssq %rdi, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <4 x float> @llvm.x86.avx512.cvtusi642ss(<4 x float> %a, i64 %b, i32 4) ; <<<4 x float>> [#uses=1]
+  ret <4 x float> %res
+}
+declare <4 x float> @llvm.x86.avx512.cvtusi642ss(<4 x float>, i64, i32) nounwind readnone
+
+define <2 x double> @test_x86_avx512_mm_cvtu64_sd(<2 x double> %a, i64 %b) {
+; CHECK-LABEL: test_x86_avx512_mm_cvtu64_sd:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvtusi2sdq %rdi, {rd-sae}, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double> %a, i64 %b, i32 1) ; <<<2 x double>> [#uses=1]
+  ret <2 x double> %res
+}
+
+define <2 x double> @test_x86_avx512__mm_cvt_roundu64_sd(<2 x double> %a, i64 %b) {
+; CHECK-LABEL: test_x86_avx512__mm_cvt_roundu64_sd:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    vcvtusi2sdq %rdi, %xmm0, %xmm0
+; CHECK-NEXT:    retq
+  %res = call <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double> %a, i64 %b, i32 4) ; <<<2 x double>> [#uses=1]
+  ret <2 x double> %res
+}
+declare <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double>, i64, i32) nounwind readnone

Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll?rev=352708&r1=352707&r2=352708&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll Wed Jan 30 16:49:40 2019
@@ -1,25 +1,41 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f | FileCheck %s
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X86
 
 
 define <8 x double> @test_mask_compress_pd_512(<8 x double> %data, <8 x double> %passthru, i8 %mask) {
-; CHECK-LABEL: test_mask_compress_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcompresspd %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_compress_pd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcompresspd %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_compress_pd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vcompresspd %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = bitcast i8 %mask to <8 x i1>
   %2 = call <8 x double> @llvm.x86.avx512.mask.compress.v8f64(<8 x double> %data, <8 x double> %passthru, <8 x i1> %1)
   ret <8 x double> %2
 }
 
 define <8 x double> @test_maskz_compress_pd_512(<8 x double> %data, i8 %mask) {
-; CHECK-LABEL: test_maskz_compress_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcompresspd %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_compress_pd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcompresspd %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_compress_pd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vcompresspd %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = bitcast i8 %mask to <8 x i1>
   %2 = call <8 x double> @llvm.x86.avx512.mask.compress.v8f64(<8 x double> %data, <8 x double> zeroinitializer, <8 x i1> %1)
   ret <8 x double> %2
@@ -27,30 +43,43 @@ define <8 x double> @test_maskz_compress
 
 define <8 x double> @test_compress_pd_512(<8 x double> %data) {
 ; CHECK-LABEL: test_compress_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    retq
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x double> @llvm.x86.avx512.mask.compress.v8f64(<8 x double> %data, <8 x double> undef, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret <8 x double> %1
 }
 
 define <16 x float> @test_mask_compress_ps_512(<16 x float> %data, <16 x float> %passthru, i16 %mask) {
-; CHECK-LABEL: test_mask_compress_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcompressps %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_compress_ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcompressps %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_compress_ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vcompressps %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = bitcast i16 %mask to <16 x i1>
   %2 = call <16 x float> @llvm.x86.avx512.mask.compress.v16f32(<16 x float> %data, <16 x float> %passthru, <16 x i1> %1)
   ret <16 x float> %2
 }
 
 define <16 x float> @test_maskz_compress_ps_512(<16 x float> %data, i16 %mask) {
-; CHECK-LABEL: test_maskz_compress_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcompressps %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_compress_ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcompressps %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_compress_ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vcompressps %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = bitcast i16 %mask to <16 x i1>
   %2 = call <16 x float> @llvm.x86.avx512.mask.compress.v16f32(<16 x float> %data, <16 x float> zeroinitializer, <16 x i1> %1)
   ret <16 x float> %2
@@ -58,30 +87,45 @@ define <16 x float> @test_maskz_compress
 
 define <16 x float> @test_compress_ps_512(<16 x float> %data) {
 ; CHECK-LABEL: test_compress_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    retq
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.mask.compress.v16f32(<16 x float> %data, <16 x float> undef, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret <16 x float> %1
 }
 
 define <8 x i64> @test_mask_compress_q_512(<8 x i64> %data, <8 x i64> %passthru, i8 %mask) {
-; CHECK-LABEL: test_mask_compress_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpcompressq %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_compress_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpcompressq %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_compress_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpcompressq %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = bitcast i8 %mask to <8 x i1>
   %2 = call <8 x i64> @llvm.x86.avx512.mask.compress.v8i64(<8 x i64> %data, <8 x i64> %passthru, <8 x i1> %1)
   ret <8 x i64> %2
 }
 
 define <8 x i64> @test_maskz_compress_q_512(<8 x i64> %data, i8 %mask) {
-; CHECK-LABEL: test_maskz_compress_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpcompressq %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_compress_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpcompressq %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_compress_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpcompressq %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = bitcast i8 %mask to <8 x i1>
   %2 = call <8 x i64> @llvm.x86.avx512.mask.compress.v8i64(<8 x i64> %data, <8 x i64> zeroinitializer, <8 x i1> %1)
   ret <8 x i64> %2
@@ -89,30 +133,43 @@ define <8 x i64> @test_maskz_compress_q_
 
 define <8 x i64> @test_compress_q_512(<8 x i64> %data) {
 ; CHECK-LABEL: test_compress_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    retq
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x i64> @llvm.x86.avx512.mask.compress.v8i64(<8 x i64> %data, <8 x i64> undef, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret <8 x i64> %1
 }
 
 define <16 x i32> @test_mask_compress_d_512(<16 x i32> %data, <16 x i32> %passthru, i16 %mask) {
-; CHECK-LABEL: test_mask_compress_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpcompressd %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_compress_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpcompressd %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_compress_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpcompressd %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = bitcast i16 %mask to <16 x i1>
   %2 = call <16 x i32> @llvm.x86.avx512.mask.compress.v16i32(<16 x i32> %data, <16 x i32> %passthru, <16 x i1> %1)
   ret <16 x i32> %2
 }
 
 define <16 x i32> @test_maskz_compress_d_512(<16 x i32> %data, i16 %mask) {
-; CHECK-LABEL: test_maskz_compress_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpcompressd %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_compress_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpcompressd %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_compress_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpcompressd %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = bitcast i16 %mask to <16 x i1>
   %2 = call <16 x i32> @llvm.x86.avx512.mask.compress.v16i32(<16 x i32> %data, <16 x i32> zeroinitializer, <16 x i1> %1)
   ret <16 x i32> %2
@@ -120,38 +177,53 @@ define <16 x i32> @test_maskz_compress_d
 
 define <16 x i32> @test_compress_d_512(<16 x i32> %data) {
 ; CHECK-LABEL: test_compress_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    retq
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x i32> @llvm.x86.avx512.mask.compress.v16i32(<16 x i32> %data, <16 x i32> undef, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret <16 x i32> %1
 }
 
 define <8 x double> @test_expand_pd_512(<8 x double> %data) {
 ; CHECK-LABEL: test_expand_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    retq
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x double> @llvm.x86.avx512.mask.expand.v8f64(<8 x double> %data, <8 x double> undef, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret <8 x double> %1
 }
 
 define <8 x double> @test_mask_expand_pd_512(<8 x double> %data, <8 x double> %passthru, i8 %mask) {
-; CHECK-LABEL: test_mask_expand_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vexpandpd %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_expand_pd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vexpandpd %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_expand_pd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vexpandpd %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = bitcast i8 %mask to <8 x i1>
   %2 = call <8 x double> @llvm.x86.avx512.mask.expand.v8f64(<8 x double> %data, <8 x double> %passthru, <8 x i1> %1)
   ret <8 x double> %2
 }
 
 define <8 x double> @test_maskz_expand_pd_512(<8 x double> %data, i8 %mask) {
-; CHECK-LABEL: test_maskz_expand_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vexpandpd %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_expand_pd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vexpandpd %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_expand_pd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vexpandpd %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = bitcast i8 %mask to <8 x i1>
   %2 = call <8 x double> @llvm.x86.avx512.mask.expand.v8f64(<8 x double> %data, <8 x double> zeroinitializer, <8 x i1> %1)
   ret <8 x double> %2
@@ -159,30 +231,43 @@ define <8 x double> @test_maskz_expand_p
 
 define <16 x float> @test_expand_ps_512(<16 x float> %data) {
 ; CHECK-LABEL: test_expand_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    retq
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.mask.expand.v16f32(<16 x float> %data, <16 x float> undef, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mask_expand_ps_512(<16 x float> %data, <16 x float> %passthru, i16 %mask) {
-; CHECK-LABEL: test_mask_expand_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vexpandps %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_expand_ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vexpandps %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_expand_ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vexpandps %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = bitcast i16 %mask to <16 x i1>
   %2 = call <16 x float> @llvm.x86.avx512.mask.expand.v16f32(<16 x float> %data, <16 x float> %passthru, <16 x i1> %1)
   ret <16 x float> %2
 }
 
 define <16 x float> @test_maskz_expand_ps_512(<16 x float> %data, i16 %mask) {
-; CHECK-LABEL: test_maskz_expand_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vexpandps %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_expand_ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vexpandps %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_expand_ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vexpandps %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = bitcast i16 %mask to <16 x i1>
   %2 = call <16 x float> @llvm.x86.avx512.mask.expand.v16f32(<16 x float> %data, <16 x float> zeroinitializer, <16 x i1> %1)
   ret <16 x float> %2
@@ -190,30 +275,45 @@ define <16 x float> @test_maskz_expand_p
 
 define <8 x i64> @test_expand_q_512(<8 x i64> %data) {
 ; CHECK-LABEL: test_expand_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    retq
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x i64> @llvm.x86.avx512.mask.expand.v8i64(<8 x i64> %data, <8 x i64> undef, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret <8 x i64> %1
 }
 
 define <8 x i64> @test_mask_expand_q_512(<8 x i64> %data, <8 x i64> %passthru, i8 %mask) {
-; CHECK-LABEL: test_mask_expand_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpexpandq %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_expand_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpexpandq %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_expand_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpexpandq %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = bitcast i8 %mask to <8 x i1>
   %2 = call <8 x i64> @llvm.x86.avx512.mask.expand.v8i64(<8 x i64> %data, <8 x i64> %passthru, <8 x i1> %1)
   ret <8 x i64> %2
 }
 
 define <8 x i64> @test_maskz_expand_q_512(<8 x i64> %data, i8 %mask) {
-; CHECK-LABEL: test_maskz_expand_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpexpandq %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_expand_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpexpandq %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_expand_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpexpandq %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = bitcast i8 %mask to <8 x i1>
   %2 = call <8 x i64> @llvm.x86.avx512.mask.expand.v8i64(<8 x i64> %data, <8 x i64> zeroinitializer, <8 x i1> %1)
   ret <8 x i64> %2
@@ -221,30 +321,43 @@ define <8 x i64> @test_maskz_expand_q_51
 
 define <16 x i32> @test_expand_d_512(<16 x i32> %data) {
 ; CHECK-LABEL: test_expand_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    retq
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x i32> @llvm.x86.avx512.mask.expand.v16i32(<16 x i32> %data, <16 x i32> undef, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>)
   ret <16 x i32> %1
 }
 
 define <16 x i32> @test_mask_expand_d_512(<16 x i32> %data, <16 x i32> %passthru, i16 %mask) {
-; CHECK-LABEL: test_mask_expand_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpexpandd %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_expand_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpexpandd %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_expand_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpexpandd %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = bitcast i16 %mask to <16 x i1>
   %2 = call <16 x i32> @llvm.x86.avx512.mask.expand.v16i32(<16 x i32> %data, <16 x i32> %passthru, <16 x i1> %1)
   ret <16 x i32> %2
 }
 
 define <16 x i32> @test_maskz_expand_d_512(<16 x i32> %data, i16 %mask) {
-; CHECK-LABEL: test_maskz_expand_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpexpandd %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_expand_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpexpandd %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_expand_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpexpandd %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = bitcast i16 %mask to <16 x i1>
   %2 = call <16 x i32> @llvm.x86.avx512.mask.expand.v16i32(<16 x i32> %data, <16 x i32> zeroinitializer, <16 x i1> %1)
   ret <16 x i32> %2
@@ -252,9 +365,9 @@ define <16 x i32> @test_maskz_expand_d_5
 
 define <16 x float> @test_rcp_ps_512(<16 x float> %a0) {
 ; CHECK-LABEL: test_rcp_ps_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrcp14ps %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x float> @llvm.x86.avx512.rcp14.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1) ; <<16 x float>> [#uses=1]
   ret <16 x float> %res
 }
@@ -262,9 +375,9 @@ declare <16 x float> @llvm.x86.avx512.rc
 
 define <8 x double> @test_rcp_pd_512(<8 x double> %a0) {
 ; CHECK-LABEL: test_rcp_pd_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrcp14pd %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x double> @llvm.x86.avx512.rcp14.pd.512(<8 x double> %a0, <8 x double> zeroinitializer, i8 -1) ; <<8 x double>> [#uses=1]
   ret <8 x double> %res
 }
@@ -274,42 +387,66 @@ declare <2 x double> @llvm.x86.avx512.ma
 
 define <2 x double> @test_rndscale_sd(<2 x double> %a, <2 x double> %b) {
 ; CHECK-LABEL: test_rndscale_sd:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vroundsd $11, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.avx512.mask.rndscale.sd(<2 x double> %a, <2 x double> %b, <2 x double> undef, i8 -1, i32 11, i32 4)
   ret <2 x double>%res
 }
 
 define <2 x double> @test_rndscale_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) {
-; CHECK-LABEL: test_rndscale_sd_mask:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vrndscalesd $11, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovapd %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_rndscale_sd_mask:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vrndscalesd $11, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovapd %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_rndscale_sd_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vrndscalesd $11, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovapd %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx512.mask.rndscale.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 11, i32 4)
   ret <2 x double>%res
 }
 
 define <2 x double> @test_rndscale_sd_mask_load(<2 x double> %a, <2 x double>* %bptr, <2 x double> %c, i8 %mask) {
-; CHECK-LABEL: test_rndscale_sd_mask_load:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vrndscalesd $11, (%rdi), %xmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vmovapd %xmm1, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_rndscale_sd_mask_load:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vrndscalesd $11, (%rdi), %xmm0, %xmm1 {%k1}
+; X64-NEXT:    vmovapd %xmm1, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_rndscale_sd_mask_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vrndscalesd $11, (%eax), %xmm0, %xmm1 {%k1}
+; X86-NEXT:    vmovapd %xmm1, %xmm0
+; X86-NEXT:    retl
   %b = load <2 x double>, <2 x double>* %bptr
   %res = call <2 x double> @llvm.x86.avx512.mask.rndscale.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask, i32 11, i32 4)
   ret <2 x double>%res
 }
 
 define <2 x double> @test_rndscale_sd_maskz(<2 x double> %a, <2 x double> %b, i8 %mask) {
-; CHECK-LABEL: test_rndscale_sd_maskz:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vrndscalesd $11, %xmm1, %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_rndscale_sd_maskz:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vrndscalesd $11, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_rndscale_sd_maskz:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vrndscalesd $11, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx512.mask.rndscale.sd(<2 x double> %a, <2 x double> %b, <2 x double> zeroinitializer, i8 %mask, i32 11, i32 4)
   ret <2 x double>%res
 }
@@ -318,40 +455,61 @@ declare <4 x float> @llvm.x86.avx512.mas
 
 define <4 x float> @test_rndscale_ss(<4 x float> %a, <4 x float> %b) {
 ; CHECK-LABEL: test_rndscale_ss:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vroundss $11, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.avx512.mask.rndscale.ss(<4 x float> %a, <4 x float> %b, <4 x float> undef, i8 -1, i32 11, i32 4)
   ret <4 x float>%res
 }
 
 define <4 x float> @test_rndscale_ss_load(<4 x float> %a, <4 x float>* %bptr) {
-; CHECK-LABEL: test_rndscale_ss_load:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vroundss $11, (%rdi), %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_rndscale_ss_load:
+; X64:       # %bb.0:
+; X64-NEXT:    vroundss $11, (%rdi), %xmm0, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_rndscale_ss_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vroundss $11, (%eax), %xmm0, %xmm0
+; X86-NEXT:    retl
   %b = load <4 x float>, <4 x float>* %bptr
   %res = call <4 x float> @llvm.x86.avx512.mask.rndscale.ss(<4 x float> %a, <4 x float> %b, <4 x float> undef, i8 -1, i32 11, i32 4)
   ret <4 x float>%res
 }
 
 define <4 x float> @test_rndscale_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) {
-; CHECK-LABEL: test_rndscale_ss_mask:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vrndscaless $11, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovaps %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_rndscale_ss_mask:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vrndscaless $11, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovaps %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_rndscale_ss_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vrndscaless $11, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovaps %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.mask.rndscale.ss(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask, i32 11, i32 4)
   ret <4 x float>%res
 }
 
 define <4 x float> @test_rndscale_ss_maskz(<4 x float> %a, <4 x float> %b, i8 %mask) {
-; CHECK-LABEL: test_rndscale_ss_maskz:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vrndscaless $11, %xmm1, %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_rndscale_ss_maskz:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vrndscaless $11, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_rndscale_ss_maskz:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vrndscaless $11, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.mask.rndscale.ss(<4 x float> %a, <4 x float> %b, <4 x float> zeroinitializer, i8 %mask, i32 11, i32 4)
   ret <4 x float>%res
 }
@@ -360,9 +518,9 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double> @test7(<8 x double> %a) {
 ; CHECK-LABEL: test7:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscalepd $11, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512(<8 x double> %a, i32 11, <8 x double> %a, i8 -1, i32 4)
   ret <8 x double>%res
 }
@@ -371,18 +529,18 @@ declare <16 x float> @llvm.x86.avx512.ma
 
 define <16 x float> @test8(<16 x float> %a) {
 ; CHECK-LABEL: test8:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrndscaleps $11, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512(<16 x float> %a, i32 11, <16 x float> %a, i16 -1, i32 4)
   ret <16 x float>%res
 }
 
 define <16 x float> @test_rsqrt_ps_512(<16 x float> %a0) {
 ; CHECK-LABEL: test_rsqrt_ps_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vrsqrt14ps %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x float> @llvm.x86.avx512.rsqrt14.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1) ; <<16 x float>> [#uses=1]
   ret <16 x float> %res
 }
@@ -390,20 +548,28 @@ declare <16 x float> @llvm.x86.avx512.rs
 
 define <8 x double> @test_sqrt_pd_512(<8 x double> %a0) {
 ; CHECK-LABEL: test_sqrt_pd_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsqrtpd %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> %a0)
   ret <8 x double> %1
 }
 
 define <8 x double> @test_mask_sqrt_pd_512(<8 x double> %a0, <8 x double> %passthru, i8 %mask) {
-; CHECK-LABEL: test_mask_sqrt_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vsqrtpd %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovapd %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_sqrt_pd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vsqrtpd %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovapd %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_sqrt_pd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vsqrtpd %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovapd %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> %a0)
   %2 = bitcast i8 %mask to <8 x i1>
   %3 = select <8 x i1> %2, <8 x double> %1, <8 x double> %passthru
@@ -411,11 +577,18 @@ define <8 x double> @test_mask_sqrt_pd_5
 }
 
 define <8 x double> @test_maskz_sqrt_pd_512(<8 x double> %a0, i8 %mask) {
-; CHECK-LABEL: test_maskz_sqrt_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vsqrtpd %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_sqrt_pd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vsqrtpd %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_sqrt_pd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vsqrtpd %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <8 x double> @llvm.sqrt.v8f64(<8 x double> %a0)
   %2 = bitcast i8 %mask to <8 x i1>
   %3 = select <8 x i1> %2, <8 x double> %1, <8 x double> zeroinitializer
@@ -425,20 +598,28 @@ declare <8 x double> @llvm.sqrt.v8f64(<8
 
 define <8 x double> @test_sqrt_round_pd_512(<8 x double> %a0) {
 ; CHECK-LABEL: test_sqrt_round_pd_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsqrtpd {rz-sae}, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x double> @llvm.x86.avx512.sqrt.pd.512(<8 x double> %a0, i32 11)
   ret <8 x double> %1
 }
 
 define <8 x double> @test_mask_sqrt_round_pd_512(<8 x double> %a0, <8 x double> %passthru, i8 %mask) {
-; CHECK-LABEL: test_mask_sqrt_round_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vsqrtpd {rz-sae}, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovapd %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_sqrt_round_pd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vsqrtpd {rz-sae}, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovapd %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_sqrt_round_pd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vsqrtpd {rz-sae}, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovapd %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = call <8 x double> @llvm.x86.avx512.sqrt.pd.512(<8 x double> %a0, i32 11)
   %2 = bitcast i8 %mask to <8 x i1>
   %3 = select <8 x i1> %2, <8 x double> %1, <8 x double> %passthru
@@ -446,11 +627,18 @@ define <8 x double> @test_mask_sqrt_roun
 }
 
 define <8 x double> @test_maskz_sqrt_round_pd_512(<8 x double> %a0, i8 %mask) {
-; CHECK-LABEL: test_maskz_sqrt_round_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vsqrtpd {rz-sae}, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_sqrt_round_pd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vsqrtpd {rz-sae}, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_sqrt_round_pd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vsqrtpd {rz-sae}, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <8 x double> @llvm.x86.avx512.sqrt.pd.512(<8 x double> %a0, i32 11)
   %2 = bitcast i8 %mask to <8 x i1>
   %3 = select <8 x i1> %2, <8 x double> %1, <8 x double> zeroinitializer
@@ -460,20 +648,27 @@ declare <8 x double> @llvm.x86.avx512.sq
 
 define <16 x float> @test_sqrt_ps_512(<16 x float> %a0) {
 ; CHECK-LABEL: test_sqrt_ps_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsqrtps %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.sqrt.v16f32(<16 x float> %a0)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mask_sqrt_ps_512(<16 x float> %a0, <16 x float> %passthru, i16 %mask) {
-; CHECK-LABEL: test_mask_sqrt_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vsqrtps %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovaps %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_sqrt_ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vsqrtps %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovaps %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_sqrt_ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vsqrtps %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovaps %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.sqrt.v16f32(<16 x float> %a0)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %passthru
@@ -481,11 +676,17 @@ define <16 x float> @test_mask_sqrt_ps_5
 }
 
 define <16 x float> @test_maskz_sqrt_ps_512(<16 x float> %a0, i16 %mask) {
-; CHECK-LABEL: test_maskz_sqrt_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vsqrtps %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_sqrt_ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vsqrtps %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_sqrt_ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vsqrtps %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.sqrt.v16f32(<16 x float> %a0)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -495,20 +696,27 @@ declare <16 x float> @llvm.sqrt.v16f32(<
 
 define <16 x float> @test_sqrt_round_ps_512(<16 x float> %a0) {
 ; CHECK-LABEL: test_sqrt_round_ps_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsqrtps {rz-sae}, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.sqrt.ps.512(<16 x float> %a0, i32 11)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mask_sqrt_round_ps_512(<16 x float> %a0, <16 x float> %passthru, i16 %mask) {
-; CHECK-LABEL: test_mask_sqrt_round_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vsqrtps {rz-sae}, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovaps %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_sqrt_round_ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vsqrtps {rz-sae}, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovaps %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_sqrt_round_ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vsqrtps {rz-sae}, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovaps %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.sqrt.ps.512(<16 x float> %a0, i32 11)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %passthru
@@ -516,11 +724,17 @@ define <16 x float> @test_mask_sqrt_roun
 }
 
 define <16 x float> @test_maskz_sqrt_round_ps_512(<16 x float> %a0, i16 %mask) {
-; CHECK-LABEL: test_maskz_sqrt_round_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vsqrtps {rz-sae}, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_sqrt_round_ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vsqrtps {rz-sae}, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_sqrt_round_ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vsqrtps {rz-sae}, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.sqrt.ps.512(<16 x float> %a0, i32 11)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -530,17 +744,17 @@ declare <16 x float> @llvm.x86.avx512.sq
 
 define <8 x double> @test_getexp_pd_512(<8 x double> %a0) {
 ; CHECK-LABEL: test_getexp_pd_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vgetexppd %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double> %a0,  <8 x double> zeroinitializer, i8 -1, i32 4)
   ret <8 x double> %res
 }
 define <8 x double> @test_getexp_round_pd_512(<8 x double> %a0) {
 ; CHECK-LABEL: test_getexp_round_pd_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vgetexppd {sae}, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x double> @llvm.x86.avx512.mask.getexp.pd.512(<8 x double> %a0,  <8 x double> zeroinitializer, i8 -1, i32 8)
   ret <8 x double> %res
 }
@@ -548,18 +762,18 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <16 x float> @test_getexp_ps_512(<16 x float> %a0) {
 ; CHECK-LABEL: test_getexp_ps_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vgetexpps %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x float> @llvm.x86.avx512.mask.getexp.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
   ret <16 x float> %res
 }
 
 define <16 x float> @test_getexp_round_ps_512(<16 x float> %a0) {
 ; CHECK-LABEL: test_getexp_round_ps_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vgetexpps {sae}, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x float> @llvm.x86.avx512.mask.getexp.ps.512(<16 x float> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
   ret <16 x float> %res
 }
@@ -568,18 +782,32 @@ declare <16 x float> @llvm.x86.avx512.ma
 declare <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
 
 define <4 x float> @test_sqrt_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_sqrt_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovaps %xmm2, %xmm3
-; CHECK-NEXT:    vsqrtss %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT:    vsqrtss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vaddps %xmm2, %xmm3, %xmm2
-; CHECK-NEXT:    vsqrtss {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
-; CHECK-NEXT:    vsqrtss {rz-sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vaddps %xmm0, %xmm3, %xmm0
-; CHECK-NEXT:    vaddps %xmm0, %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_sqrt_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovaps %xmm2, %xmm3
+; X64-NEXT:    vsqrtss %xmm1, %xmm0, %xmm3 {%k1}
+; X64-NEXT:    vsqrtss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vaddps %xmm2, %xmm3, %xmm2
+; X64-NEXT:    vsqrtss {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
+; X64-NEXT:    vsqrtss {rz-sae}, %xmm1, %xmm0, %xmm0
+; X64-NEXT:    vaddps %xmm0, %xmm3, %xmm0
+; X64-NEXT:    vaddps %xmm0, %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_sqrt_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovaps %xmm2, %xmm3
+; X86-NEXT:    vsqrtss %xmm1, %xmm0, %xmm3 {%k1}
+; X86-NEXT:    vsqrtss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vaddps %xmm2, %xmm3, %xmm2
+; X86-NEXT:    vsqrtss {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
+; X86-NEXT:    vsqrtss {rz-sae}, %xmm1, %xmm0, %xmm0
+; X86-NEXT:    vaddps %xmm0, %xmm3, %xmm0
+; X86-NEXT:    vaddps %xmm0, %xmm2, %xmm0
+; X86-NEXT:    retl
   %res0 = call <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
   %res1 = call <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 1)
   %res2 = call <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %mask, i32 2)
@@ -594,18 +822,32 @@ define <4 x float> @test_sqrt_ss(<4 x fl
 declare <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
 
 define <2 x double> @test_sqrt_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_sqrt_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovapd %xmm2, %xmm3
-; CHECK-NEXT:    vsqrtsd %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT:    vsqrtsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vaddpd %xmm2, %xmm3, %xmm2
-; CHECK-NEXT:    vsqrtsd {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
-; CHECK-NEXT:    vsqrtsd {rz-sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
-; CHECK-NEXT:    vaddpd %xmm0, %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_sqrt_sd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovapd %xmm2, %xmm3
+; X64-NEXT:    vsqrtsd %xmm1, %xmm0, %xmm3 {%k1}
+; X64-NEXT:    vsqrtsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vaddpd %xmm2, %xmm3, %xmm2
+; X64-NEXT:    vsqrtsd {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
+; X64-NEXT:    vsqrtsd {rz-sae}, %xmm1, %xmm0, %xmm0
+; X64-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X64-NEXT:    vaddpd %xmm0, %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_sqrt_sd:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovapd %xmm2, %xmm3
+; X86-NEXT:    vsqrtsd %xmm1, %xmm0, %xmm3 {%k1}
+; X86-NEXT:    vsqrtsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vaddpd %xmm2, %xmm3, %xmm2
+; X86-NEXT:    vsqrtsd {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
+; X86-NEXT:    vsqrtsd {rz-sae}, %xmm1, %xmm0, %xmm0
+; X86-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X86-NEXT:    vaddpd %xmm0, %xmm2, %xmm0
+; X86-NEXT:    retl
   %res0 = call <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4)
   %res1 = call <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 1)
   %res2 = call <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 %mask, i32 2)
@@ -617,47 +859,13 @@ define <2 x double> @test_sqrt_sd(<2 x d
   ret <2 x double> %res
 }
 
-define i64 @test_x86_sse2_cvtsd2si64(<2 x double> %a0) {
-; CHECK-LABEL: test_x86_sse2_cvtsd2si64:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtsd2si %xmm0, %rax
-; CHECK-NEXT:    retq
-  %res = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %a0) ; <i64> [#uses=1]
-  ret i64 %res
-}
-declare i64 @llvm.x86.sse2.cvtsd2si64(<2 x double>) nounwind readnone
-
-define <2 x double> @test_x86_sse2_cvtsi642sd(<2 x double> %a0, i64 %a1) {
-; CHECK-LABEL: test_x86_sse2_cvtsi642sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-  %res = call <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double> %a0, i64 %a1) ; <<2 x double>> [#uses=1]
-  ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.sse2.cvtsi642sd(<2 x double>, i64) nounwind readnone
-
-define i64 @test_x86_avx512_cvttsd2si64(<2 x double> %a0) {
-; CHECK-LABEL: test_x86_avx512_cvttsd2si64:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvttsd2si %xmm0, %rcx
-; CHECK-NEXT:    vcvttsd2si {sae}, %xmm0, %rax
-; CHECK-NEXT:    addq %rcx, %rax
-; CHECK-NEXT:    retq
-  %res0 = call i64 @llvm.x86.avx512.cvttsd2si64(<2 x double> %a0, i32 4) ;
-  %res1 = call i64 @llvm.x86.avx512.cvttsd2si64(<2 x double> %a0, i32 8) ;
-  %res2 = add i64 %res0, %res1
-  ret i64 %res2
-}
-declare i64 @llvm.x86.avx512.cvttsd2si64(<2 x double>, i32) nounwind readnone
-
 define i32 @test_x86_avx512_cvttsd2usi(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvttsd2usi:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttsd2usi %xmm0, %ecx
 ; CHECK-NEXT:    vcvttsd2usi {sae}, %xmm0, %eax
 ; CHECK-NEXT:    addl %ecx, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call i32 @llvm.x86.avx512.cvttsd2usi(<2 x double> %a0, i32 4) ;
   %res1 = call i32 @llvm.x86.avx512.cvttsd2usi(<2 x double> %a0, i32 8) ;
   %res2 = add i32 %res0, %res1
@@ -667,11 +875,11 @@ declare i32 @llvm.x86.avx512.cvttsd2usi(
 
 define i32 @test_x86_avx512_cvttsd2si(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvttsd2si:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttsd2si %xmm0, %ecx
 ; CHECK-NEXT:    vcvttsd2si {sae}, %xmm0, %eax
 ; CHECK-NEXT:    addl %ecx, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call i32 @llvm.x86.avx512.cvttsd2si(<2 x double> %a0, i32 4) ;
   %res1 = call i32 @llvm.x86.avx512.cvttsd2si(<2 x double> %a0, i32 8) ;
   %res2 = add i32 %res0, %res1
@@ -679,51 +887,13 @@ define i32 @test_x86_avx512_cvttsd2si(<2
 }
 declare i32 @llvm.x86.avx512.cvttsd2si(<2 x double>, i32) nounwind readnone
 
-
-
-define i64 @test_x86_avx512_cvttsd2usi64(<2 x double> %a0) {
-; CHECK-LABEL: test_x86_avx512_cvttsd2usi64:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvttsd2usi %xmm0, %rcx
-; CHECK-NEXT:    vcvttsd2usi {sae}, %xmm0, %rax
-; CHECK-NEXT:    addq %rcx, %rax
-; CHECK-NEXT:    retq
-  %res0 = call i64 @llvm.x86.avx512.cvttsd2usi64(<2 x double> %a0, i32 4) ;
-  %res1 = call i64 @llvm.x86.avx512.cvttsd2usi64(<2 x double> %a0, i32 8) ;
-  %res2 = add i64 %res0, %res1
-  ret i64 %res2
-}
-declare i64 @llvm.x86.avx512.cvttsd2usi64(<2 x double>, i32) nounwind readnone
-
-define i64 @test_x86_sse_cvtss2si64(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_sse_cvtss2si64:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtss2si %xmm0, %rax
-; CHECK-NEXT:    retq
-  %res = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %a0) ; <i64> [#uses=1]
-  ret i64 %res
-}
-declare i64 @llvm.x86.sse.cvtss2si64(<4 x float>) nounwind readnone
-
-
-define <4 x float> @test_x86_sse_cvtsi642ss(<4 x float> %a0, i64 %a1) {
-; CHECK-LABEL: test_x86_sse_cvtsi642ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-  %res = call <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float> %a0, i64 %a1) ; <<4 x float>> [#uses=1]
-  ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.sse.cvtsi642ss(<4 x float>, i64) nounwind readnone
-
-
 define i32 @test_x86_avx512_cvttss2si(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvttss2si:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttss2si {sae}, %xmm0, %ecx
 ; CHECK-NEXT:    vcvttss2si %xmm0, %eax
 ; CHECK-NEXT:    addl %ecx, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call i32 @llvm.x86.avx512.cvttss2si(<4 x float> %a0, i32 8) ;
   %res1 = call i32 @llvm.x86.avx512.cvttss2si(<4 x float> %a0, i32 4) ;
   %res2 = add i32 %res0, %res1
@@ -732,36 +902,28 @@ define i32 @test_x86_avx512_cvttss2si(<4
 declare i32 @llvm.x86.avx512.cvttss2si(<4 x float>, i32) nounwind readnone
 
 define i32 @test_x86_avx512_cvttss2si_load(<4 x float>* %a0) {
-; CHECK-LABEL: test_x86_avx512_cvttss2si_load:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvttss2si (%rdi), %eax
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_cvttss2si_load:
+; X64:       # %bb.0:
+; X64-NEXT:    vcvttss2si (%rdi), %eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_cvttss2si_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vcvttss2si (%eax), %eax
+; X86-NEXT:    retl
   %a1 = load <4 x float>, <4 x float>* %a0
   %res = call i32 @llvm.x86.avx512.cvttss2si(<4 x float> %a1, i32 4) ;
   ret i32 %res
 }
 
-define i64 @test_x86_avx512_cvttss2si64(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_avx512_cvttss2si64:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvttss2si %xmm0, %rcx
-; CHECK-NEXT:    vcvttss2si {sae}, %xmm0, %rax
-; CHECK-NEXT:    addq %rcx, %rax
-; CHECK-NEXT:    retq
-  %res0 = call i64 @llvm.x86.avx512.cvttss2si64(<4 x float> %a0, i32 4) ;
-  %res1 = call i64 @llvm.x86.avx512.cvttss2si64(<4 x float> %a0, i32 8) ;
-  %res2 = add i64 %res0, %res1
-  ret i64 %res2
-}
-declare i64 @llvm.x86.avx512.cvttss2si64(<4 x float>, i32) nounwind readnone
-
 define i32 @test_x86_avx512_cvttss2usi(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvttss2usi:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvttss2usi {sae}, %xmm0, %ecx
 ; CHECK-NEXT:    vcvttss2usi %xmm0, %eax
 ; CHECK-NEXT:    addl %ecx, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res0 = call i32 @llvm.x86.avx512.cvttss2usi(<4 x float> %a0, i32 8) ;
   %res1 = call i32 @llvm.x86.avx512.cvttss2usi(<4 x float> %a0, i32 4) ;
   %res2 = add i32 %res0, %res1
@@ -769,105 +931,15 @@ define i32 @test_x86_avx512_cvttss2usi(<
 }
 declare i32 @llvm.x86.avx512.cvttss2usi(<4 x float>, i32) nounwind readnone
 
-define i64 @test_x86_avx512_cvttss2usi64(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_avx512_cvttss2usi64:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvttss2usi %xmm0, %rcx
-; CHECK-NEXT:    vcvttss2usi {sae}, %xmm0, %rax
-; CHECK-NEXT:    addq %rcx, %rax
-; CHECK-NEXT:    retq
-  %res0 = call i64 @llvm.x86.avx512.cvttss2usi64(<4 x float> %a0, i32 4) ;
-  %res1 = call i64 @llvm.x86.avx512.cvttss2usi64(<4 x float> %a0, i32 8) ;
-  %res2 = add i64 %res0, %res1
-  ret i64 %res2
-}
-declare i64 @llvm.x86.avx512.cvttss2usi64(<4 x float>, i32) nounwind readnone
-
-define i64 @test_x86_avx512_cvtsd2usi64(<2 x double> %a0) {
-; CHECK-LABEL: test_x86_avx512_cvtsd2usi64:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtsd2usi %xmm0, %rax
-; CHECK-NEXT:    vcvtsd2usi {rz-sae}, %xmm0, %rcx
-; CHECK-NEXT:    addq %rax, %rcx
-; CHECK-NEXT:    vcvtsd2usi {rd-sae}, %xmm0, %rax
-; CHECK-NEXT:    addq %rcx, %rax
-; CHECK-NEXT:    retq
-
-  %res = call i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double> %a0, i32 4)
-  %res1 = call i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double> %a0, i32 3)
-  %res2 = call i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double> %a0, i32 1)
-  %res3 = add i64 %res, %res1
-  %res4 = add i64 %res3, %res2
-  ret i64 %res4
-}
-declare i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double>, i32) nounwind readnone
-
-define i64 @test_x86_avx512_cvtsd2si64(<2 x double> %a0) {
-; CHECK-LABEL: test_x86_avx512_cvtsd2si64:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtsd2si %xmm0, %rax
-; CHECK-NEXT:    vcvtsd2si {rz-sae}, %xmm0, %rcx
-; CHECK-NEXT:    addq %rax, %rcx
-; CHECK-NEXT:    vcvtsd2si {rd-sae}, %xmm0, %rax
-; CHECK-NEXT:    addq %rcx, %rax
-; CHECK-NEXT:    retq
-
-  %res = call i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double> %a0, i32 4)
-  %res1 = call i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double> %a0, i32 3)
-  %res2 = call i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double> %a0, i32 1)
-  %res3 = add i64 %res, %res1
-  %res4 = add i64 %res3, %res2
-  ret i64 %res4
-}
-declare i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double>, i32) nounwind readnone
-
-define i64 @test_x86_avx512_cvtss2usi64(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_avx512_cvtss2usi64:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtss2usi %xmm0, %rax
-; CHECK-NEXT:    vcvtss2usi {rz-sae}, %xmm0, %rcx
-; CHECK-NEXT:    addq %rax, %rcx
-; CHECK-NEXT:    vcvtss2usi {rd-sae}, %xmm0, %rax
-; CHECK-NEXT:    addq %rcx, %rax
-; CHECK-NEXT:    retq
-
-  %res = call i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float> %a0, i32 4)
-  %res1 = call i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float> %a0, i32 3)
-  %res2 = call i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float> %a0, i32 1)
-  %res3 = add i64 %res, %res1
-  %res4 = add i64 %res3, %res2
-  ret i64 %res4
-}
-declare i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float>, i32) nounwind readnone
-
-define i64 @test_x86_avx512_cvtss2si64(<4 x float> %a0) {
-; CHECK-LABEL: test_x86_avx512_cvtss2si64:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtss2si %xmm0, %rax
-; CHECK-NEXT:    vcvtss2si {rz-sae}, %xmm0, %rcx
-; CHECK-NEXT:    addq %rax, %rcx
-; CHECK-NEXT:    vcvtss2si {rd-sae}, %xmm0, %rax
-; CHECK-NEXT:    addq %rcx, %rax
-; CHECK-NEXT:    retq
-
-  %res = call i64 @llvm.x86.avx512.vcvtss2si64(<4 x float> %a0, i32 4)
-  %res1 = call i64 @llvm.x86.avx512.vcvtss2si64(<4 x float> %a0, i32 3)
-  %res2 = call i64 @llvm.x86.avx512.vcvtss2si64(<4 x float> %a0, i32 1)
-  %res3 = add i64 %res, %res1
-  %res4 = add i64 %res3, %res2
-  ret i64 %res4
-}
-declare i64 @llvm.x86.avx512.vcvtss2si64(<4 x float>, i32) nounwind readnone
-
 define i32 @test_x86_avx512_cvtsd2usi32(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvtsd2usi32:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtsd2usi %xmm0, %eax
 ; CHECK-NEXT:    vcvtsd2usi {rz-sae}, %xmm0, %ecx
 ; CHECK-NEXT:    addl %eax, %ecx
 ; CHECK-NEXT:    vcvtsd2usi {rd-sae}, %xmm0, %eax
 ; CHECK-NEXT:    addl %ecx, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
 
   %res = call i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double> %a0, i32 4)
   %res1 = call i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double> %a0, i32 3)
@@ -880,13 +952,13 @@ declare i32 @llvm.x86.avx512.vcvtsd2usi3
 
 define i32 @test_x86_avx512_cvtsd2si32(<2 x double> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvtsd2si32:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtsd2si %xmm0, %eax
 ; CHECK-NEXT:    vcvtsd2si {rz-sae}, %xmm0, %ecx
 ; CHECK-NEXT:    addl %eax, %ecx
 ; CHECK-NEXT:    vcvtsd2si {rd-sae}, %xmm0, %eax
 ; CHECK-NEXT:    addl %ecx, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
 
   %res = call i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double> %a0, i32 4)
   %res1 = call i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double> %a0, i32 3)
@@ -899,13 +971,13 @@ declare i32 @llvm.x86.avx512.vcvtsd2si32
 
 define i32 @test_x86_avx512_cvtss2usi32(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvtss2usi32:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtss2usi %xmm0, %eax
 ; CHECK-NEXT:    vcvtss2usi {rz-sae}, %xmm0, %ecx
 ; CHECK-NEXT:    addl %eax, %ecx
 ; CHECK-NEXT:    vcvtss2usi {rd-sae}, %xmm0, %eax
 ; CHECK-NEXT:    addl %ecx, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
 
   %res = call i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float> %a0, i32 4)
   %res1 = call i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float> %a0, i32 3)
@@ -918,13 +990,13 @@ declare i32 @llvm.x86.avx512.vcvtss2usi3
 
 define i32 @test_x86_avx512_cvtss2si32(<4 x float> %a0) {
 ; CHECK-LABEL: test_x86_avx512_cvtss2si32:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtss2si %xmm0, %eax
 ; CHECK-NEXT:    vcvtss2si {rz-sae}, %xmm0, %ecx
 ; CHECK-NEXT:    addl %eax, %ecx
 ; CHECK-NEXT:    vcvtss2si {rd-sae}, %xmm0, %eax
 ; CHECK-NEXT:    addl %ecx, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
 
   %res = call i32 @llvm.x86.avx512.vcvtss2si32(<4 x float> %a0, i32 4)
   %res1 = call i32 @llvm.x86.avx512.vcvtss2si32(<4 x float> %a0, i32 3)
@@ -937,49 +1009,68 @@ declare i32 @llvm.x86.avx512.vcvtss2si32
 
 define <16 x float> @test_x86_vcvtph2ps_512(<16 x i16> %a0) {
 ; CHECK-LABEL: test_x86_vcvtph2ps_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtph2ps %ymm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> zeroinitializer, i16 -1, i32 4)
   ret <16 x float> %res
 }
 
 define <16 x float> @test_x86_vcvtph2ps_512_sae(<16 x i16> %a0) {
 ; CHECK-LABEL: test_x86_vcvtph2ps_512_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcvtph2ps {sae}, %ymm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> zeroinitializer, i16 -1, i32 8)
   ret <16 x float> %res
 }
 
 define <16 x float> @test_x86_vcvtph2ps_512_rrk(<16 x i16> %a0,<16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_x86_vcvtph2ps_512_rrk:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvtph2ps %ymm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovaps %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_vcvtph2ps_512_rrk:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvtph2ps %ymm0, %zmm1 {%k1}
+; X64-NEXT:    vmovaps %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_vcvtph2ps_512_rrk:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vcvtph2ps %ymm0, %zmm1 {%k1}
+; X86-NEXT:    vmovaps %zmm1, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> %a1, i16 %mask, i32 4)
   ret <16 x float> %res
 }
 
 define <16 x float> @test_x86_vcvtph2ps_512_sae_rrkz(<16 x i16> %a0, i16 %mask) {
-; CHECK-LABEL: test_x86_vcvtph2ps_512_sae_rrkz:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvtph2ps {sae}, %ymm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_vcvtph2ps_512_sae_rrkz:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvtph2ps {sae}, %ymm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_vcvtph2ps_512_sae_rrkz:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vcvtph2ps {sae}, %ymm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> zeroinitializer, i16 %mask, i32 8)
   ret <16 x float> %res
 }
 
 define <16 x float> @test_x86_vcvtph2ps_512_rrkz(<16 x i16> %a0, i16 %mask) {
-; CHECK-LABEL: test_x86_vcvtph2ps_512_rrkz:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvtph2ps %ymm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_vcvtph2ps_512_rrkz:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvtph2ps %ymm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_vcvtph2ps_512_rrkz:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vcvtph2ps %ymm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16> %a0, <16 x float> zeroinitializer, i16 %mask, i32 4)
   ret <16 x float> %res
 }
@@ -987,15 +1078,26 @@ define <16 x float> @test_x86_vcvtph2ps_
 declare <16 x float> @llvm.x86.avx512.mask.vcvtph2ps.512(<16 x i16>, <16 x float>, i16, i32) nounwind readonly
 
 define <16 x i16> @test_x86_vcvtps2ph_256(<16 x float> %a0, <16 x i16> %src, i16 %mask, <16 x i16> * %dst) {
-; CHECK-LABEL: test_x86_vcvtps2ph_256:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvtps2ph $2, %zmm0, %ymm2 {%k1} {z}
-; CHECK-NEXT:    vcvtps2ph $2, %zmm0, %ymm1 {%k1}
-; CHECK-NEXT:    vpaddw %ymm1, %ymm2, %ymm1
-; CHECK-NEXT:    vcvtps2ph $2, %zmm0, (%rsi)
-; CHECK-NEXT:    vmovdqa %ymm1, %ymm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_vcvtps2ph_256:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvtps2ph $2, %zmm0, %ymm2 {%k1} {z}
+; X64-NEXT:    vcvtps2ph $2, %zmm0, %ymm1 {%k1}
+; X64-NEXT:    vpaddw %ymm1, %ymm2, %ymm1
+; X64-NEXT:    vcvtps2ph $2, %zmm0, (%rsi)
+; X64-NEXT:    vmovdqa %ymm1, %ymm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_vcvtps2ph_256:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vcvtps2ph $2, %zmm0, %ymm2 {%k1} {z}
+; X86-NEXT:    vcvtps2ph $2, %zmm0, %ymm1 {%k1}
+; X86-NEXT:    vpaddw %ymm1, %ymm2, %ymm1
+; X86-NEXT:    vcvtps2ph $2, %zmm0, (%eax)
+; X86-NEXT:    vmovdqa %ymm1, %ymm0
+; X86-NEXT:    retl
   %res1 = call <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float> %a0, i32 2, <16 x i16> zeroinitializer, i16 -1)
   %res2 = call <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float> %a0, i32 2, <16 x i16> zeroinitializer, i16 %mask)
   %res3 = call <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float> %a0, i32 2, <16 x i16> %src, i16 %mask)
@@ -1008,12 +1110,12 @@ declare <16 x i16> @llvm.x86.avx512.mask
 
 define i16 @test_cmpps(<16 x float> %a, <16 x float> %b) {
 ; CHECK-LABEL: test_cmpps:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpleps {sae}, %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    ## kill: def $ax killed $ax killed $eax
+; CHECK-NEXT:    # kill: def $ax killed $ax killed $eax
 ; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x i1> @llvm.x86.avx512.cmp.ps.512(<16 x float> %a, <16 x float> %b, i32 2, i32 8)
   %1 = bitcast <16 x i1> %res to i16
   ret i16 %1
@@ -1022,12 +1124,12 @@ declare <16 x i1> @llvm.x86.avx512.cmp.p
 
 define i8 @test_cmppd(<8 x double> %a, <8 x double> %b) {
 ; CHECK-LABEL: test_cmppd:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpneqpd %zmm1, %zmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    ## kill: def $al killed $al killed $eax
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x i1> @llvm.x86.avx512.cmp.pd.512(<8 x double> %a, <8 x double> %b, i32 4, i32 4)
   %1 = bitcast <8 x i1> %res to i8
   ret i8 %1
@@ -1039,9 +1141,9 @@ declare <8 x i1> @llvm.x86.avx512.cmp.pd
  ; fp min - max
 define <8 x double> @test_vmaxpd(<8 x double> %a0, <8 x double> %a1) {
 ; CHECK-LABEL: test_vmaxpd:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmaxpd %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x double> @llvm.x86.avx512.max.pd.512(<8 x double> %a0, <8 x double> %a1, i32 4)
   ret <8 x double> %1
 }
@@ -1049,20 +1151,28 @@ declare <8 x double> @llvm.x86.avx512.ma
 
 define <8 x double> @test_vminpd(<8 x double> %a0, <8 x double> %a1) {
 ; CHECK-LABEL: test_vminpd:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vminpd %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <8 x double> @llvm.x86.avx512.min.pd.512(<8 x double> %a0, <8 x double> %a1, i32 4)
   ret <8 x double> %1
 }
 declare <8 x double> @llvm.x86.avx512.min.pd.512(<8 x double>, <8 x double>, i32)
 
 define void @test_mask_store_ss(i8* %ptr, <4 x float> %data, i8 %mask) {
-; CHECK-LABEL: test_mask_store_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vmovss %xmm0, (%rdi) {%k1}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_store_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vmovss %xmm0, (%rdi) {%k1}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_store_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    kmovw %ecx, %k1
+; X86-NEXT:    vmovss %xmm0, (%eax) {%k1}
+; X86-NEXT:    retl
   %1 = and i8 %mask, 1
   %2 = bitcast i8* %ptr to <4 x float>*
   %3 = bitcast i8 %1 to <8 x i1>
@@ -1079,83 +1189,89 @@ declare <8 x double> @llvm.x86.avx512.mu
 
 define <16 x float> @test_vsubps_rn(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vsubps_rn:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsubps {rn-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 0)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_vsubps_rd(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vsubps_rd:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsubps {rd-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 1)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_vsubps_ru(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vsubps_ru:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsubps {ru-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 2)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_vsubps_rz(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vsubps_rz:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsubps {rz-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 3)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_vmulps_rn(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vmulps_rn:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmulps {rn-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 0)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_vmulps_rd(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vmulps_rd:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmulps {rd-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 1)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_vmulps_ru(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vmulps_ru:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmulps {ru-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 2)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_vmulps_rz(<16 x float> %a0, <16 x float> %a1) {
 ; CHECK-LABEL: test_vmulps_rz:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmulps {rz-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 3)
   ret <16 x float> %1
 }
 
 ;; mask float
 define <16 x float> @test_vmulps_mask_rn(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_vmulps_mask_rn:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmulps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_vmulps_mask_rn:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmulps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_vmulps_mask_rn:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmulps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 0)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1163,11 +1279,17 @@ define <16 x float> @test_vmulps_mask_rn
 }
 
 define <16 x float> @test_vmulps_mask_rd(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_vmulps_mask_rd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmulps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_vmulps_mask_rd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmulps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_vmulps_mask_rd:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmulps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 1)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1175,11 +1297,17 @@ define <16 x float> @test_vmulps_mask_rd
 }
 
 define <16 x float> @test_vmulps_mask_ru(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_vmulps_mask_ru:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmulps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_vmulps_mask_ru:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmulps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_vmulps_mask_ru:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmulps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 2)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1187,11 +1315,17 @@ define <16 x float> @test_vmulps_mask_ru
 }
 
 define <16 x float> @test_vmulps_mask_rz(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_vmulps_mask_rz:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmulps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_vmulps_mask_rz:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmulps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_vmulps_mask_rz:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmulps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 3)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1200,12 +1334,19 @@ define <16 x float> @test_vmulps_mask_rz
 
 ;; With Passthru value
 define <16 x float> @test_vmulps_mask_passthru_rn(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
-; CHECK-LABEL: test_vmulps_mask_passthru_rn:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmulps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_vmulps_mask_passthru_rn:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmulps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_vmulps_mask_passthru_rn:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmulps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 0)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %passthru
@@ -1213,12 +1354,19 @@ define <16 x float> @test_vmulps_mask_pa
 }
 
 define <16 x float> @test_vmulps_mask_passthru_rd(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
-; CHECK-LABEL: test_vmulps_mask_passthru_rd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmulps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_vmulps_mask_passthru_rd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmulps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_vmulps_mask_passthru_rd:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmulps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 1)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %passthru
@@ -1226,12 +1374,19 @@ define <16 x float> @test_vmulps_mask_pa
 }
 
 define <16 x float> @test_vmulps_mask_passthru_ru(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
-; CHECK-LABEL: test_vmulps_mask_passthru_ru:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmulps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_vmulps_mask_passthru_ru:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmulps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_vmulps_mask_passthru_ru:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmulps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 2)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %passthru
@@ -1239,12 +1394,19 @@ define <16 x float> @test_vmulps_mask_pa
 }
 
 define <16 x float> @test_vmulps_mask_passthru_rz(<16 x float> %a0, <16 x float> %a1, <16 x float> %passthru, i16 %mask) {
-; CHECK-LABEL: test_vmulps_mask_passthru_rz:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmulps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_vmulps_mask_passthru_rz:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmulps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_vmulps_mask_passthru_rz:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmulps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.mul.ps.512(<16 x float> %a0, <16 x float> %a1, i32 3)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %passthru
@@ -1253,11 +1415,18 @@ define <16 x float> @test_vmulps_mask_pa
 
 ;; mask double
 define <8 x double> @test_vmulpd_mask_rn(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
-; CHECK-LABEL: test_vmulpd_mask_rn:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmulpd {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_vmulpd_mask_rn:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmulpd {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_vmulpd_mask_rn:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmulpd {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <8 x double> @llvm.x86.avx512.mul.pd.512(<8 x double> %a0, <8 x double> %a1, i32 0)
   %2 = bitcast i8 %mask to <8 x i1>
   %3 = select <8 x i1> %2, <8 x double> %1, <8 x double> zeroinitializer
@@ -1265,11 +1434,18 @@ define <8 x double> @test_vmulpd_mask_rn
 }
 
 define <8 x double> @test_vmulpd_mask_rd(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
-; CHECK-LABEL: test_vmulpd_mask_rd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmulpd {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_vmulpd_mask_rd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmulpd {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_vmulpd_mask_rd:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmulpd {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <8 x double> @llvm.x86.avx512.mul.pd.512(<8 x double> %a0, <8 x double> %a1, i32 1)
   %2 = bitcast i8 %mask to <8 x i1>
   %3 = select <8 x i1> %2, <8 x double> %1, <8 x double> zeroinitializer
@@ -1277,11 +1453,18 @@ define <8 x double> @test_vmulpd_mask_rd
 }
 
 define <8 x double> @test_vmulpd_mask_ru(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
-; CHECK-LABEL: test_vmulpd_mask_ru:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmulpd {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_vmulpd_mask_ru:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmulpd {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_vmulpd_mask_ru:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmulpd {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <8 x double> @llvm.x86.avx512.mul.pd.512(<8 x double> %a0, <8 x double> %a1, i32 2)
   %2 = bitcast i8 %mask to <8 x i1>
   %3 = select <8 x i1> %2, <8 x double> %1, <8 x double> zeroinitializer
@@ -1289,11 +1472,18 @@ define <8 x double> @test_vmulpd_mask_ru
 }
 
 define <8 x double> @test_vmulpd_mask_rz(<8 x double> %a0, <8 x double> %a1, i8 %mask) {
-; CHECK-LABEL: test_vmulpd_mask_rz:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmulpd {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_vmulpd_mask_rz:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmulpd {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_vmulpd_mask_rz:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmulpd {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <8 x double> @llvm.x86.avx512.mul.pd.512(<8 x double> %a0, <8 x double> %a1, i32 3)
   %2 = bitcast i8 %mask to <8 x i1>
   %3 = select <8 x i1> %2, <8 x double> %1, <8 x double> zeroinitializer
@@ -1301,11 +1491,17 @@ define <8 x double> @test_vmulpd_mask_rz
 }
 
 define <16 x float> @test_mm512_maskz_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_mm512_maskz_add_round_ps_rn_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_maskz_add_round_ps_rn_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_maskz_add_round_ps_rn_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vaddps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 0)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1313,11 +1509,17 @@ define <16 x float> @test_mm512_maskz_ad
 }
 
 define <16 x float> @test_mm512_maskz_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_mm512_maskz_add_round_ps_rd_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_maskz_add_round_ps_rd_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_maskz_add_round_ps_rd_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vaddps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 1)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1325,11 +1527,17 @@ define <16 x float> @test_mm512_maskz_ad
 }
 
 define <16 x float> @test_mm512_maskz_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_mm512_maskz_add_round_ps_ru_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_maskz_add_round_ps_ru_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_maskz_add_round_ps_ru_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vaddps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 2)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1337,11 +1545,17 @@ define <16 x float> @test_mm512_maskz_ad
 }
 
 define <16 x float> @test_mm512_maskz_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_mm512_maskz_add_round_ps_rz_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_maskz_add_round_ps_rz_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_maskz_add_round_ps_rz_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vaddps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 3)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1349,11 +1563,17 @@ define <16 x float> @test_mm512_maskz_ad
 }
 
 define <16 x float> @test_mm512_maskz_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_mm512_maskz_add_round_ps_current:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddps %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_maskz_add_round_ps_current:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddps %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_maskz_add_round_ps_current:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vaddps %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1361,12 +1581,19 @@ define <16 x float> @test_mm512_maskz_ad
 }
 
 define <16 x float> @test_mm512_mask_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_add_round_ps_rn_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_add_round_ps_rn_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_add_round_ps_rn_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vaddps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 0)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1374,12 +1601,19 @@ define <16 x float> @test_mm512_mask_add
 }
 
 define <16 x float> @test_mm512_mask_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_add_round_ps_rd_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_add_round_ps_rd_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_add_round_ps_rd_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vaddps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 1)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1387,12 +1621,19 @@ define <16 x float> @test_mm512_mask_add
 }
 
 define <16 x float> @test_mm512_mask_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_add_round_ps_ru_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_add_round_ps_ru_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_add_round_ps_ru_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vaddps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 2)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1400,12 +1641,19 @@ define <16 x float> @test_mm512_mask_add
 }
 
 define <16 x float> @test_mm512_mask_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_add_round_ps_rz_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_add_round_ps_rz_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_add_round_ps_rz_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vaddps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 3)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1413,12 +1661,19 @@ define <16 x float> @test_mm512_mask_add
 }
 
 define <16 x float> @test_mm512_mask_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_add_round_ps_current:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddps %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_add_round_ps_current:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddps %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_add_round_ps_current:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vaddps %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1427,57 +1682,64 @@ define <16 x float> @test_mm512_mask_add
 
 define <16 x float> @test_mm512_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_add_round_ps_rn_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vaddps {rn-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 0)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mm512_add_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_add_round_ps_rd_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vaddps {rd-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 1)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mm512_add_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_add_round_ps_ru_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vaddps {ru-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 2)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mm512_add_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_add_round_ps_rz_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vaddps {rz-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 3)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mm512_add_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_add_round_ps_current:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vaddps %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
   ret <16 x float> %1
 }
 declare <16 x float> @llvm.x86.avx512.add.ps.512(<16 x float>, <16 x float>, i32)
 
 define <16 x float> @test_mm512_mask_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_sub_round_ps_rn_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vsubps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_sub_round_ps_rn_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vsubps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_sub_round_ps_rn_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vsubps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 0)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1485,12 +1747,19 @@ define <16 x float> @test_mm512_mask_sub
 }
 
 define <16 x float> @test_mm512_mask_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_sub_round_ps_rd_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vsubps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_sub_round_ps_rd_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vsubps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_sub_round_ps_rd_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vsubps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 1)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1498,12 +1767,19 @@ define <16 x float> @test_mm512_mask_sub
 }
 
 define <16 x float> @test_mm512_mask_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_sub_round_ps_ru_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vsubps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_sub_round_ps_ru_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vsubps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_sub_round_ps_ru_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vsubps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 2)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1511,12 +1787,19 @@ define <16 x float> @test_mm512_mask_sub
 }
 
 define <16 x float> @test_mm512_mask_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_sub_round_ps_rz_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vsubps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_sub_round_ps_rz_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vsubps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_sub_round_ps_rz_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vsubps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 3)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1524,12 +1807,19 @@ define <16 x float> @test_mm512_mask_sub
 }
 
 define <16 x float> @test_mm512_mask_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_sub_round_ps_current:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vsubps %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_sub_round_ps_current:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vsubps %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_sub_round_ps_current:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vsubps %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1538,55 +1828,61 @@ define <16 x float> @test_mm512_mask_sub
 
 define <16 x float> @test_mm512_sub_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_sub_round_ps_rn_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsubps {rn-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 0)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mm512_sub_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_sub_round_ps_rd_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsubps {rd-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 1)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mm512_sub_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_sub_round_ps_ru_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsubps {ru-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 2)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mm512_sub_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_sub_round_ps_rz_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsubps {rz-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 3)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mm512_sub_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_sub_round_ps_current:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsubps %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.sub.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mm512_maskz_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_mm512_maskz_div_round_ps_rn_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vdivps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_maskz_div_round_ps_rn_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vdivps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_maskz_div_round_ps_rn_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vdivps {rn-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 0)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1594,11 +1890,17 @@ define <16 x float> @test_mm512_maskz_di
 }
 
 define <16 x float> @test_mm512_maskz_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_mm512_maskz_div_round_ps_rd_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vdivps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_maskz_div_round_ps_rd_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vdivps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_maskz_div_round_ps_rd_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vdivps {rd-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 1)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1606,11 +1908,17 @@ define <16 x float> @test_mm512_maskz_di
 }
 
 define <16 x float> @test_mm512_maskz_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_mm512_maskz_div_round_ps_ru_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vdivps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_maskz_div_round_ps_ru_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vdivps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_maskz_div_round_ps_ru_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vdivps {ru-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 2)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1618,11 +1926,17 @@ define <16 x float> @test_mm512_maskz_di
 }
 
 define <16 x float> @test_mm512_maskz_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_mm512_maskz_div_round_ps_rz_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vdivps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_maskz_div_round_ps_rz_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vdivps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_maskz_div_round_ps_rz_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vdivps {rz-sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 3)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1630,11 +1944,17 @@ define <16 x float> @test_mm512_maskz_di
 }
 
 define <16 x float> @test_mm512_maskz_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_mm512_maskz_div_round_ps_current:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vdivps %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_maskz_div_round_ps_current:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vdivps %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_maskz_div_round_ps_current:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vdivps %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1642,12 +1962,19 @@ define <16 x float> @test_mm512_maskz_di
 }
 
 define <16 x float> @test_mm512_mask_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_div_round_ps_rn_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vdivps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_div_round_ps_rn_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vdivps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_div_round_ps_rn_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vdivps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 0)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1655,12 +1982,19 @@ define <16 x float> @test_mm512_mask_div
 }
 
 define <16 x float> @test_mm512_mask_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_div_round_ps_rd_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vdivps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_div_round_ps_rd_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vdivps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_div_round_ps_rd_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vdivps {rd-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 1)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1668,12 +2002,19 @@ define <16 x float> @test_mm512_mask_div
 }
 
 define <16 x float> @test_mm512_mask_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_div_round_ps_ru_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vdivps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_div_round_ps_ru_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vdivps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_div_round_ps_ru_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vdivps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 2)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1681,12 +2022,19 @@ define <16 x float> @test_mm512_mask_div
 }
 
 define <16 x float> @test_mm512_mask_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_div_round_ps_rz_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vdivps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_div_round_ps_rz_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vdivps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_div_round_ps_rz_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vdivps {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 3)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1694,12 +2042,19 @@ define <16 x float> @test_mm512_mask_div
 }
 
 define <16 x float> @test_mm512_mask_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_div_round_ps_current:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vdivps %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_div_round_ps_current:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vdivps %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_div_round_ps_current:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vdivps %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1708,56 +2063,62 @@ define <16 x float> @test_mm512_mask_div
 
 define <16 x float> @test_mm512_div_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_div_round_ps_rn_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vdivps {rn-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 0)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mm512_div_round_ps_rd_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_div_round_ps_rd_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vdivps {rd-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 1)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mm512_div_round_ps_ru_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_div_round_ps_ru_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vdivps {ru-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 2)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mm512_div_round_ps_rz_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_div_round_ps_rz_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vdivps {rz-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 3)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mm512_div_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_div_round_ps_current:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vdivps %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
   ret <16 x float> %1
 }
 declare <16 x float> @llvm.x86.avx512.div.ps.512(<16 x float>, <16 x float>, i32)
 
 define <16 x float> @test_mm512_maskz_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_mm512_maskz_min_round_ps_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vminps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_maskz_min_round_ps_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vminps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_maskz_min_round_ps_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vminps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1765,11 +2126,17 @@ define <16 x float> @test_mm512_maskz_mi
 }
 
 define <16 x float> @test_mm512_maskz_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_mm512_maskz_min_round_ps_current:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vminps %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_maskz_min_round_ps_current:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vminps %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_maskz_min_round_ps_current:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vminps %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1777,12 +2144,19 @@ define <16 x float> @test_mm512_maskz_mi
 }
 
 define <16 x float> @test_mm512_mask_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_min_round_ps_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vminps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_min_round_ps_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vminps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_min_round_ps_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vminps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1790,12 +2164,19 @@ define <16 x float> @test_mm512_mask_min
 }
 
 define <16 x float> @test_mm512_mask_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_min_round_ps_current:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vminps %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_min_round_ps_current:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vminps %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_min_round_ps_current:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vminps %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1804,29 +2185,35 @@ define <16 x float> @test_mm512_mask_min
 
 define <16 x float> @test_mm512_min_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_min_round_ps_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vminps {sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mm512_min_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_min_round_ps_current:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vminps %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
   ret <16 x float> %1
 }
 declare <16 x float> @llvm.x86.avx512.min.ps.512(<16 x float>, <16 x float>, i32)
 
 define <16 x float> @test_mm512_maskz_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_mm512_maskz_max_round_ps_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmaxps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_maskz_max_round_ps_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmaxps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_maskz_max_round_ps_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmaxps {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1834,11 +2221,17 @@ define <16 x float> @test_mm512_maskz_ma
 }
 
 define <16 x float> @test_mm512_maskz_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
-; CHECK-LABEL: test_mm512_maskz_max_round_ps_current:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmaxps %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_maskz_max_round_ps_current:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmaxps %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_maskz_max_round_ps_current:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmaxps %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -1846,12 +2239,19 @@ define <16 x float> @test_mm512_maskz_ma
 }
 
 define <16 x float> @test_mm512_mask_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_max_round_ps_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmaxps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_max_round_ps_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmaxps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_max_round_ps_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmaxps {sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1859,12 +2259,19 @@ define <16 x float> @test_mm512_mask_max
 }
 
 define <16 x float> @test_mm512_mask_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %src, i16 %mask) {
-; CHECK-LABEL: test_mm512_mask_max_round_ps_current:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmaxps %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mm512_mask_max_round_ps_current:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmaxps %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mm512_mask_max_round_ps_current:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmaxps %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
   %2 = bitcast i16 %mask to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %src
@@ -1873,18 +2280,18 @@ define <16 x float> @test_mm512_mask_max
 
 define <16 x float> @test_mm512_max_round_ps_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_max_round_ps_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmaxps {sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a0, <16 x float> %a1, i32 8)
   ret <16 x float> %1
 }
 
 define <16 x float> @test_mm512_max_round_ps_current(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
 ; CHECK-LABEL: test_mm512_max_round_ps_current:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmaxps %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %1 = call <16 x float> @llvm.x86.avx512.max.ps.512(<16 x float> %a0, <16 x float> %a1, i32 4)
   ret <16 x float> %1
 }
@@ -1893,86 +2300,142 @@ declare <16 x float> @llvm.x86.avx512.ma
 declare <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
 
 define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_add_ss_rn:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovaps %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_add_ss_rn:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovaps %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_add_ss_rn:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vaddss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovaps %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 0)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_add_ss_rd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovaps %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_add_ss_rd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovaps %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_add_ss_rd:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vaddss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovaps %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 1)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_add_ss_ru:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddss {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovaps %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_add_ss_ru:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddss {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovaps %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_add_ss_ru:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vaddss {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovaps %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 2)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_add_ss_rz:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovaps %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_add_ss_rz:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovaps %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_add_ss_rz:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vaddss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovaps %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 3)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_add_ss_current:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddss %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovaps %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_add_ss_current:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddss %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovaps %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_add_ss_current:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vaddss %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovaps %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_maskz_add_ss_rn(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
-; CHECK-LABEL: test_maskz_add_ss_rn:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddss {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_add_ss_rn:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddss {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_add_ss_rn:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vaddss {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %mask, i32 0)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_add_ss_rn(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test_add_ss_rn:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vaddss {rn-sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 0)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_add_ss_current_memfold:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vaddss (%rdi), %xmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vmovaps %xmm1, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_add_ss_current_memfold:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vaddss (%rdi), %xmm0, %xmm1 {%k1}
+; X64-NEXT:    vmovaps %xmm1, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_add_ss_current_memfold:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vaddss (%eax), %xmm0, %xmm1 {%k1}
+; X86-NEXT:    vmovaps %xmm1, %xmm0
+; X86-NEXT:    retl
   %a1.val = load float, float* %a1
   %a1v0 = insertelement <4 x float> undef, float %a1.val, i32 0
   %a1v1 = insertelement <4 x float> %a1v0, float 0.000000e+00, i32 1
@@ -1983,11 +2446,19 @@ define <4 x float> @test_mask_add_ss_cur
 }
 
 define <4 x float> @test_maskz_add_ss_current_memfold(<4 x float> %a0, float* %a1, i8 %mask) {
-; CHECK-LABEL: test_maskz_add_ss_current_memfold:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vaddss (%rdi), %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_add_ss_current_memfold:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vaddss (%rdi), %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_add_ss_current_memfold:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vaddss (%eax), %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    retl
   %a1.val = load float, float* %a1
   %a1v0 = insertelement <4 x float> undef, float %a1.val, i32 0
   %a1v1 = insertelement <4 x float> %a1v0, float 0.000000e+00, i32 1
@@ -2000,86 +2471,142 @@ define <4 x float> @test_maskz_add_ss_cu
 declare <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
 
 define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_add_sd_rn:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddsd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovapd %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_add_sd_rn:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddsd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovapd %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_add_sd_rn:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vaddsd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovapd %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 0)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_add_sd_rd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovapd %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_add_sd_rd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovapd %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_add_sd_rd:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vaddsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovapd %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 1)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_add_sd_ru:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddsd {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovapd %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_add_sd_ru:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddsd {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovapd %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_add_sd_ru:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vaddsd {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovapd %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 2)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_add_sd_rz:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddsd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovapd %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_add_sd_rz:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddsd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovapd %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_add_sd_rz:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vaddsd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovapd %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 3)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_add_sd_current:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddsd %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovapd %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_add_sd_current:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddsd %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovapd %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_add_sd_current:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vaddsd %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovapd %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_maskz_add_sd_rn(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
-; CHECK-LABEL: test_maskz_add_sd_rn:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_add_sd_rn:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_add_sd_rn:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 %mask, i32 0)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_add_sd_rn(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_add_sd_rn:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 0)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_add_sd_current_memfold:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vaddsd (%rdi), %xmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vmovapd %xmm1, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_add_sd_current_memfold:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vaddsd (%rdi), %xmm0, %xmm1 {%k1}
+; X64-NEXT:    vmovapd %xmm1, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_add_sd_current_memfold:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vaddsd (%eax), %xmm0, %xmm1 {%k1}
+; X86-NEXT:    vmovapd %xmm1, %xmm0
+; X86-NEXT:    retl
   %a1.val = load double, double* %a1
   %a1v0 = insertelement <2 x double> undef, double %a1.val, i32 0
   %a1v = insertelement <2 x double> %a1v0, double 0.000000e+00, i32 1
@@ -2088,11 +2615,19 @@ define <2 x double> @test_mask_add_sd_cu
 }
 
 define <2 x double> @test_maskz_add_sd_current_memfold(<2 x double> %a0, double* %a1, i8 %mask) {
-; CHECK-LABEL: test_maskz_add_sd_current_memfold:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vaddsd (%rdi), %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_add_sd_current_memfold:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vaddsd (%rdi), %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_add_sd_current_memfold:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vaddsd (%eax), %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    retl
   %a1.val = load double, double* %a1
   %a1v0 = insertelement <2 x double> undef, double %a1.val, i32 0
   %a1v = insertelement <2 x double> %a1v0, double 0.000000e+00, i32 1
@@ -2103,72 +2638,111 @@ define <2 x double> @test_maskz_add_sd_c
 declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
 
 define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_max_ss_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmaxss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovaps %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_max_ss_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmaxss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovaps %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_max_ss_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmaxss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovaps %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 8)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_maskz_max_ss_sae(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
-; CHECK-LABEL: test_maskz_max_ss_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmaxss {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_max_ss_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmaxss {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_max_ss_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmaxss {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %mask, i32 8)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_max_ss_sae(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test_max_ss_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmaxss {sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 8)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_max_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmaxss %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovaps %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_max_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmaxss %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovaps %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_max_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmaxss %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovaps %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_maskz_max_ss(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
-; CHECK-LABEL: test_maskz_max_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmaxss %xmm1, %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_max_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmaxss %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_max_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmaxss %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %mask, i32 4)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_max_ss(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test_max_ss:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmaxss %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 -1, i32 4)
   ret <4 x float> %res
 }
 
 define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_max_ss_memfold:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vmaxss (%rdi), %xmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vmovaps %xmm1, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_max_ss_memfold:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vmaxss (%rdi), %xmm0, %xmm1 {%k1}
+; X64-NEXT:    vmovaps %xmm1, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_max_ss_memfold:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vmaxss (%eax), %xmm0, %xmm1 {%k1}
+; X86-NEXT:    vmovaps %xmm1, %xmm0
+; X86-NEXT:    retl
   %a1.val = load float, float* %a1
   %a1v0 = insertelement <4 x float> undef, float %a1.val, i32 0
   %a1v1 = insertelement <4 x float> %a1v0, float 0.000000e+00, i32 1
@@ -2179,11 +2753,19 @@ define <4 x float> @test_mask_max_ss_mem
 }
 
 define <4 x float> @test_maskz_max_ss_memfold(<4 x float> %a0, float* %a1, i8 %mask) {
-; CHECK-LABEL: test_maskz_max_ss_memfold:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vmaxss (%rdi), %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_max_ss_memfold:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vmaxss (%rdi), %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_max_ss_memfold:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vmaxss (%eax), %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    retl
   %a1.val = load float, float* %a1
   %a1v0 = insertelement <4 x float> undef, float %a1.val, i32 0
   %a1v1 = insertelement <4 x float> %a1v0, float 0.000000e+00, i32 1
@@ -2195,72 +2777,111 @@ define <4 x float> @test_maskz_max_ss_me
 declare <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
 
 define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_max_sd_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmaxsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovapd %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_max_sd_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmaxsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovapd %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_max_sd_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmaxsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovapd %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 8)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_maskz_max_sd_sae(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
-; CHECK-LABEL: test_maskz_max_sd_sae:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmaxsd {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_max_sd_sae:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmaxsd {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_max_sd_sae:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmaxsd {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 %mask, i32 8)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_max_sd_sae(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_max_sd_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmaxsd {sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 8)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_max_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmaxsd %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vmovapd %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_max_sd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmaxsd %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vmovapd %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_max_sd:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmaxsd %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vmovapd %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_maskz_max_sd(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
-; CHECK-LABEL: test_maskz_max_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_max_sd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_max_sd:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 %mask, i32 4)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_max_sd(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_max_sd:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmaxsd %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 -1, i32 4)
   ret <2 x double> %res
 }
 
 define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_mask_max_sd_memfold:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vmaxsd (%rdi), %xmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vmovapd %xmm1, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_mask_max_sd_memfold:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vmaxsd (%rdi), %xmm0, %xmm1 {%k1}
+; X64-NEXT:    vmovapd %xmm1, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_mask_max_sd_memfold:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vmaxsd (%eax), %xmm0, %xmm1 {%k1}
+; X86-NEXT:    vmovapd %xmm1, %xmm0
+; X86-NEXT:    retl
   %a1.val = load double, double* %a1
   %a1v0 = insertelement <2 x double> undef, double %a1.val, i32 0
   %a1v = insertelement <2 x double> %a1v0, double 0.000000e+00, i32 1
@@ -2269,11 +2890,19 @@ define <2 x double> @test_mask_max_sd_me
 }
 
 define <2 x double> @test_maskz_max_sd_memfold(<2 x double> %a0, double* %a1, i8 %mask) {
-; CHECK-LABEL: test_maskz_max_sd_memfold:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vmaxsd (%rdi), %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_maskz_max_sd_memfold:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vmaxsd (%rdi), %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_maskz_max_sd_memfold:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vmaxsd (%eax), %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    retl
   %a1.val = load double, double* %a1
   %a1v0 = insertelement <2 x double> undef, double %a1.val, i32 0
   %a1v = insertelement <2 x double> %a1v0, double 0.000000e+00, i32 1
@@ -2281,133 +2910,107 @@ define <2 x double> @test_maskz_max_sd_m
   ret <2 x double> %res
 }
 
-define <2 x double> @test_x86_avx512_cvtsi2sd64(<2 x double> %a, i64 %b) {
-; CHECK-LABEL: test_x86_avx512_cvtsi2sd64:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtsi2sdq %rdi, {rz-sae}, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-  %res = call <2 x double> @llvm.x86.avx512.cvtsi2sd64(<2 x double> %a, i64 %b, i32 3) ; <<<2 x double>> [#uses=1]
-  ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.avx512.cvtsi2sd64(<2 x double>, i64, i32) nounwind readnone
-
 define <4 x float> @test_x86_avx512_cvtsi2ss32(<4 x float> %a, i32 %b) {
-; CHECK-LABEL: test_x86_avx512_cvtsi2ss32:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtsi2ssl %edi, {rz-sae}, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_cvtsi2ss32:
+; X64:       # %bb.0:
+; X64-NEXT:    vcvtsi2ssl %edi, {rz-sae}, %xmm0, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_cvtsi2ss32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vcvtsi2ssl %eax, {rz-sae}, %xmm0, %xmm0
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.cvtsi2ss32(<4 x float> %a, i32 %b, i32 3) ; <<<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
 declare <4 x float> @llvm.x86.avx512.cvtsi2ss32(<4 x float>, i32, i32) nounwind readnone
 
-define <4 x float> @test_x86_avx512_cvtsi2ss64(<4 x float> %a, i64 %b) {
-; CHECK-LABEL: test_x86_avx512_cvtsi2ss64:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtsi2ssq %rdi, {rz-sae}, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-  %res = call <4 x float> @llvm.x86.avx512.cvtsi2ss64(<4 x float> %a, i64 %b, i32 3) ; <<<4 x float>> [#uses=1]
-  ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.avx512.cvtsi2ss64(<4 x float>, i64, i32) nounwind readnone
-
-define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss (<4 x float> %a, i32 %b)
-; CHECK-LABEL: test_x86_avx512__mm_cvt_roundu32_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtusi2ssl %edi, {rd-sae}, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-{
+define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss (<4 x float> %a, i32 %b) {
+; X64-LABEL: test_x86_avx512__mm_cvt_roundu32_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    vcvtusi2ssl %edi, {rd-sae}, %xmm0, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512__mm_cvt_roundu32_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vcvtusi2ssl %eax, {rd-sae}, %xmm0, %xmm0
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float> %a, i32 %b, i32 1) ; <<<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
 
-define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss_mem(<4 x float> %a, i32* %ptr)
-; CHECK-LABEL: test_x86_avx512__mm_cvt_roundu32_ss_mem:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    movl (%rdi), %eax
-; CHECK-NEXT:    vcvtusi2ssl %eax, {rd-sae}, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-{
+define <4 x float> @test_x86_avx512__mm_cvt_roundu32_ss_mem(<4 x float> %a, i32* %ptr) {
+; X64-LABEL: test_x86_avx512__mm_cvt_roundu32_ss_mem:
+; X64:       # %bb.0:
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    vcvtusi2ssl %eax, {rd-sae}, %xmm0, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512__mm_cvt_roundu32_ss_mem:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    vcvtusi2ssl %eax, {rd-sae}, %xmm0, %xmm0
+; X86-NEXT:    retl
   %b = load i32, i32* %ptr
   %res = call <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float> %a, i32 %b, i32 1) ; <<<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
 
-define <4 x float> @test_x86_avx512__mm_cvtu32_ss(<4 x float> %a, i32 %b)
-; CHECK-LABEL: test_x86_avx512__mm_cvtu32_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtusi2ssl %edi, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-{
+define <4 x float> @test_x86_avx512__mm_cvtu32_ss(<4 x float> %a, i32 %b) {
+; X64-LABEL: test_x86_avx512__mm_cvtu32_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    vcvtusi2ssl %edi, %xmm0, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512__mm_cvtu32_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    vcvtusi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float> %a, i32 %b, i32 4) ; <<<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
 
-define <4 x float> @test_x86_avx512__mm_cvtu32_ss_mem(<4 x float> %a, i32* %ptr)
-; CHECK-LABEL: test_x86_avx512__mm_cvtu32_ss_mem:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtusi2ssl (%rdi), %xmm0, %xmm0
-; CHECK-NEXT:    retq
-{
+define <4 x float> @test_x86_avx512__mm_cvtu32_ss_mem(<4 x float> %a, i32* %ptr) {
+; X64-LABEL: test_x86_avx512__mm_cvtu32_ss_mem:
+; X64:       # %bb.0:
+; X64-NEXT:    vcvtusi2ssl (%rdi), %xmm0, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512__mm_cvtu32_ss_mem:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vcvtusi2ssl (%eax), %xmm0, %xmm0
+; X86-NEXT:    retl
   %b = load i32, i32* %ptr
   %res = call <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float> %a, i32 %b, i32 4) ; <<<4 x float>> [#uses=1]
   ret <4 x float> %res
 }
 declare <4 x float> @llvm.x86.avx512.cvtusi2ss(<4 x float>, i32, i32) nounwind readnone
 
-define <4 x float> @_mm_cvt_roundu64_ss (<4 x float> %a, i64 %b)
-; CHECK-LABEL: _mm_cvt_roundu64_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtusi2ssq %rdi, {rd-sae}, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-{
-  %res = call <4 x float> @llvm.x86.avx512.cvtusi642ss(<4 x float> %a, i64 %b, i32 1) ; <<<4 x float>> [#uses=1]
-  ret <4 x float> %res
-}
-
-define <4 x float> @_mm_cvtu64_ss(<4 x float> %a, i64 %b)
-; CHECK-LABEL: _mm_cvtu64_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtusi2ssq %rdi, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-{
-  %res = call <4 x float> @llvm.x86.avx512.cvtusi642ss(<4 x float> %a, i64 %b, i32 4) ; <<<4 x float>> [#uses=1]
-  ret <4 x float> %res
-}
-declare <4 x float> @llvm.x86.avx512.cvtusi642ss(<4 x float>, i64, i32) nounwind readnone
-
-define <2 x double> @test_x86_avx512_mm_cvtu64_sd(<2 x double> %a, i64 %b)
-; CHECK-LABEL: test_x86_avx512_mm_cvtu64_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtusi2sdq %rdi, {rd-sae}, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-{
-  %res = call <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double> %a, i64 %b, i32 1) ; <<<2 x double>> [#uses=1]
-  ret <2 x double> %res
-}
-
-define <2 x double> @test_x86_avx512__mm_cvt_roundu64_sd(<2 x double> %a, i64 %b)
-; CHECK-LABEL: test_x86_avx512__mm_cvt_roundu64_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vcvtusi2sdq %rdi, %xmm0, %xmm0
-; CHECK-NEXT:    retq
-{
-  %res = call <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double> %a, i64 %b, i32 4) ; <<<2 x double>> [#uses=1]
-  ret <2 x double> %res
-}
-declare <2 x double> @llvm.x86.avx512.cvtusi642sd(<2 x double>, i64, i32) nounwind readnone
-
 declare <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32>, <16 x i32>, <16 x i32>)
 
 define <16 x i32>@test_int_x86_avx512_mask_vpermi2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm3
-; CHECK-NEXT:    vpermi2d (%rdi), %zmm0, %zmm3 {%k1}
-; CHECK-NEXT:    vpermt2d %zmm2, %zmm1, %zmm0
-; CHECK-NEXT:    vpaddd %zmm0, %zmm3, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_vpermi2var_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm3
+; X64-NEXT:    vpermi2d (%rdi), %zmm0, %zmm3 {%k1}
+; X64-NEXT:    vpermt2d %zmm2, %zmm1, %zmm0
+; X64-NEXT:    vpaddd %zmm0, %zmm3, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_vpermi2var_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm3
+; X86-NEXT:    vpermi2d (%eax), %zmm0, %zmm3 {%k1}
+; X86-NEXT:    vpermt2d %zmm2, %zmm1, %zmm0
+; X86-NEXT:    vpaddd %zmm0, %zmm3, %zmm0
+; X86-NEXT:    retl
   %x2 = load <16 x i32>, <16 x i32>* %x2p
   %1 = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2)
   %2 = bitcast i16 %x3 to <16 x i1>
@@ -2420,14 +3023,24 @@ define <16 x i32>@test_int_x86_avx512_ma
 declare <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double>, <8 x i64>, <8 x double>)
 
 define <8 x double>@test_int_x86_avx512_mask_vpermi2var_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovapd %zmm0, %zmm3
-; CHECK-NEXT:    vpermt2pd %zmm2, %zmm1, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermi2pd %zmm2, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vaddpd %zmm3, %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_vpermi2var_pd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovapd %zmm0, %zmm3
+; X64-NEXT:    vpermt2pd %zmm2, %zmm1, %zmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermi2pd %zmm2, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vaddpd %zmm3, %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_vpermi2var_pd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovapd %zmm0, %zmm3
+; X86-NEXT:    vpermt2pd %zmm2, %zmm1, %zmm3
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpermi2pd %zmm2, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vaddpd %zmm3, %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = call <8 x double> @llvm.x86.avx512.vpermi2var.pd.512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2)
   %2 = bitcast <8 x i64> %x1 to <8 x double>
   %3 = bitcast i8 %x3 to <8 x i1>
@@ -2441,14 +3054,23 @@ define <8 x double>@test_int_x86_avx512_
 declare <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float>, <16 x i32>, <16 x float>)
 
 define <16 x float>@test_int_x86_avx512_mask_vpermi2var_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovaps %zmm0, %zmm3
-; CHECK-NEXT:    vpermt2ps %zmm2, %zmm1, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermi2ps %zmm2, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vaddps %zmm3, %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_vpermi2var_ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovaps %zmm0, %zmm3
+; X64-NEXT:    vpermt2ps %zmm2, %zmm1, %zmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermi2ps %zmm2, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vaddps %zmm3, %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_vpermi2var_ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps %zmm0, %zmm3
+; X86-NEXT:    vpermt2ps %zmm2, %zmm1, %zmm3
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermi2ps %zmm2, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vaddps %zmm3, %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2)
   %2 = bitcast <16 x i32> %x1 to <16 x float>
   %3 = bitcast i16 %x3 to <16 x i1>
@@ -2462,14 +3084,24 @@ define <16 x float>@test_int_x86_avx512_
 declare <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64>, <8 x i64>, <8 x i64>)
 
 define <8 x i64>@test_int_x86_avx512_mask_vpermi2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vpermi2var_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm3
-; CHECK-NEXT:    vpermt2q %zmm2, %zmm1, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermi2q %zmm2, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vpaddq %zmm3, %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_vpermi2var_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm3
+; X64-NEXT:    vpermt2q %zmm2, %zmm1, %zmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermi2q %zmm2, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vpaddq %zmm3, %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_vpermi2var_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm3
+; X86-NEXT:    vpermt2q %zmm2, %zmm1, %zmm3
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpermi2q %zmm2, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vpaddq %zmm3, %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2)
   %2 = bitcast i8 %x3 to <8 x i1>
   %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x1
@@ -2479,14 +3111,24 @@ define <8 x i64>@test_int_x86_avx512_mas
 }
 
 define <16 x i32>@test_int_x86_avx512_maskz_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, i16 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm2
-; CHECK-NEXT:    vpermt2d (%rdi), %zmm0, %zmm2 {%k1} {z}
-; CHECK-NEXT:    vpermt2d %zmm1, %zmm0, %zmm1
-; CHECK-NEXT:    vpaddd %zmm1, %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm2
+; X64-NEXT:    vpermt2d (%rdi), %zmm0, %zmm2 {%k1} {z}
+; X64-NEXT:    vpermt2d %zmm1, %zmm0, %zmm1
+; X64-NEXT:    vpaddd %zmm1, %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_maskz_vpermt2var_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm2
+; X86-NEXT:    vpermt2d (%eax), %zmm0, %zmm2 {%k1} {z}
+; X86-NEXT:    vpermt2d %zmm1, %zmm0, %zmm1
+; X86-NEXT:    vpaddd %zmm1, %zmm2, %zmm0
+; X86-NEXT:    retl
   %x2 = load <16 x i32>, <16 x i32>* %x2p
   %1 = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2)
   %2 = bitcast i16 %x3 to <16 x i1>
@@ -2497,14 +3139,25 @@ define <16 x i32>@test_int_x86_avx512_ma
 }
 
 define <8 x double>@test_int_x86_avx512_maskz_vpermt2var_pd_512(<8 x i64> %x0, <8 x double> %x1, double* %x2ptr, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vmovapd %zmm1, %zmm2
-; CHECK-NEXT:    vpermt2pd (%rdi){1to8}, %zmm0, %zmm2 {%k1} {z}
-; CHECK-NEXT:    vpermt2pd %zmm1, %zmm0, %zmm1
-; CHECK-NEXT:    vaddpd %zmm1, %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_maskz_vpermt2var_pd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vmovapd %zmm1, %zmm2
+; X64-NEXT:    vpermt2pd (%rdi){1to8}, %zmm0, %zmm2 {%k1} {z}
+; X64-NEXT:    vpermt2pd %zmm1, %zmm0, %zmm1
+; X64-NEXT:    vaddpd %zmm1, %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_maskz_vpermt2var_pd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    kmovw %ecx, %k1
+; X86-NEXT:    vmovapd %zmm1, %zmm2
+; X86-NEXT:    vpermt2pd (%eax){1to8}, %zmm0, %zmm2 {%k1} {z}
+; X86-NEXT:    vpermt2pd %zmm1, %zmm0, %zmm1
+; X86-NEXT:    vaddpd %zmm1, %zmm2, %zmm0
+; X86-NEXT:    retl
   %x2s = load double, double* %x2ptr
   %x2ins = insertelement <8 x double> undef, double %x2s, i32 0
   %x2 = shufflevector <8 x double> %x2ins, <8 x double> undef, <8 x i32> zeroinitializer
@@ -2517,14 +3170,23 @@ define <8 x double>@test_int_x86_avx512_
 }
 
 define <16 x float>@test_int_x86_avx512_maskz_vpermt2var_ps_512(<16 x i32> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovaps %zmm1, %zmm3
-; CHECK-NEXT:    vpermt2ps %zmm2, %zmm0, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermt2ps %zmm2, %zmm0, %zmm1 {%k1} {z}
-; CHECK-NEXT:    vaddps %zmm3, %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_maskz_vpermt2var_ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovaps %zmm1, %zmm3
+; X64-NEXT:    vpermt2ps %zmm2, %zmm0, %zmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2ps %zmm2, %zmm0, %zmm1 {%k1} {z}
+; X64-NEXT:    vaddps %zmm3, %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_maskz_vpermt2var_ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovaps %zmm1, %zmm3
+; X86-NEXT:    vpermt2ps %zmm2, %zmm0, %zmm3
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermt2ps %zmm2, %zmm0, %zmm1 {%k1} {z}
+; X86-NEXT:    vaddps %zmm3, %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.vpermi2var.ps.512(<16 x float> %x1, <16 x i32> %x0, <16 x float> %x2)
   %2 = bitcast i16 %x3 to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> zeroinitializer
@@ -2534,14 +3196,24 @@ define <16 x float>@test_int_x86_avx512_
 }
 
 define <8 x i64>@test_int_x86_avx512_maskz_vpermt2var_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_vpermt2var_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm3
-; CHECK-NEXT:    vpermt2q %zmm2, %zmm0, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1 {%k1} {z}
-; CHECK-NEXT:    vpaddq %zmm3, %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_maskz_vpermt2var_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm3
+; X64-NEXT:    vpermt2q %zmm2, %zmm0, %zmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1 {%k1} {z}
+; X64-NEXT:    vpaddq %zmm3, %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_maskz_vpermt2var_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm3
+; X86-NEXT:    vpermt2q %zmm2, %zmm0, %zmm3
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpermt2q %zmm2, %zmm0, %zmm1 {%k1} {z}
+; X86-NEXT:    vpaddq %zmm3, %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = call <8 x i64> @llvm.x86.avx512.vpermi2var.q.512(<8 x i64> %x1, <8 x i64> %x0, <8 x i64> %x2)
   %2 = bitcast i8 %x3 to <8 x i1>
   %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> zeroinitializer
@@ -2551,14 +3223,23 @@ define <8 x i64>@test_int_x86_avx512_mas
 }
 
 define <16 x i32>@test_int_x86_avx512_mask_vpermt2var_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vpermt2var_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm3
-; CHECK-NEXT:    vpermt2d %zmm2, %zmm0, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermt2d %zmm2, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vpaddd %zmm3, %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_vpermt2var_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm3
+; X64-NEXT:    vpermt2d %zmm2, %zmm0, %zmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermt2d %zmm2, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vpaddd %zmm3, %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_vpermt2var_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm3
+; X86-NEXT:    vpermt2d %zmm2, %zmm0, %zmm3
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermt2d %zmm2, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vpaddd %zmm3, %zmm1, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x i32> @llvm.x86.avx512.vpermi2var.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2)
   %2 = bitcast i16 %x3 to <16 x i1>
   %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x1
@@ -2569,13 +3250,22 @@ define <16 x i32>@test_int_x86_avx512_ma
 
 declare <8 x double> @llvm.x86.avx512.mask.scalef.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32)
 define <8 x double>@test_int_x86_avx512_mask_scalef_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_scalef_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vscalefpd {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vscalefpd {rn-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    vaddpd %zmm0, %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_scalef_pd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vscalefpd {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vscalefpd {rn-sae}, %zmm1, %zmm0, %zmm0
+; X64-NEXT:    vaddpd %zmm0, %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_scalef_pd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vscalefpd {rz-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vscalefpd {rn-sae}, %zmm1, %zmm0, %zmm0
+; X86-NEXT:    vaddpd %zmm0, %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <8 x double> @llvm.x86.avx512.mask.scalef.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 3)
   %res1 = call <8 x double> @llvm.x86.avx512.mask.scalef.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1, i32 0)
   %res2 = fadd <8 x double> %res, %res1
@@ -2584,13 +3274,21 @@ define <8 x double>@test_int_x86_avx512_
 
 declare <16 x float> @llvm.x86.avx512.mask.scalef.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32)
 define <16 x float>@test_int_x86_avx512_mask_scalef_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_scalef_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vscalefps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vscalefps {rn-sae}, %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    vaddps %zmm0, %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_scalef_ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vscalefps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vscalefps {rn-sae}, %zmm1, %zmm0, %zmm0
+; X64-NEXT:    vaddps %zmm0, %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_scalef_ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vscalefps {ru-sae}, %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vscalefps {rn-sae}, %zmm1, %zmm0, %zmm0
+; X86-NEXT:    vaddps %zmm0, %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x float> @llvm.x86.avx512.mask.scalef.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 2)
   %res1 = call <16 x float> @llvm.x86.avx512.mask.scalef.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1, i32 0)
   %res2 = fadd <16 x float> %res, %res1
@@ -2600,16 +3298,28 @@ define <16 x float>@test_int_x86_avx512_
 declare <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64>, <16 x i8>, i8)
 
 define <16 x i8>@test_int_x86_avx512_mask_pmov_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qb_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovqb %zmm0, %xmm2
-; CHECK-NEXT:    vpmovqb %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpmovqb %zmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; CHECK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmov_qb_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpmovqb %zmm0, %xmm2
+; X64-NEXT:    vpmovqb %zmm0, %xmm1 {%k1}
+; X64-NEXT:    vpmovqb %zmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmov_qb_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpmovqb %zmm0, %xmm2
+; X86-NEXT:    vpmovqb %zmm0, %xmm1 {%k1}
+; X86-NEXT:    vpmovqb %zmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64> %x0, <16 x i8> %x1, i8 -1)
     %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2)
     %res2 = call <16 x i8> @llvm.x86.avx512.mask.pmov.qb.512(<8 x i64> %x0, <16 x i8> zeroinitializer, i8 %x2)
@@ -2621,13 +3331,23 @@ define <16 x i8>@test_int_x86_avx512_mas
 declare void @llvm.x86.avx512.mask.pmov.qb.mem.512(i8* %ptr, <8 x i64>, i8)
 
 define void @test_int_x86_avx512_mask_pmov_qb_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qb_mem_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpmovqb %zmm0, (%rdi)
-; CHECK-NEXT:    vpmovqb %zmm0, (%rdi) {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmov_qb_mem_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpmovqb %zmm0, (%rdi)
+; X64-NEXT:    vpmovqb %zmm0, (%rdi) {%k1}
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmov_qb_mem_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovqb %zmm0, (%eax)
+; X86-NEXT:    vpmovqb %zmm0, (%eax) {%k1}
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     call void @llvm.x86.avx512.mask.pmov.qb.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
     call void @llvm.x86.avx512.mask.pmov.qb.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
     ret void
@@ -2636,16 +3356,28 @@ define void @test_int_x86_avx512_mask_pm
 declare <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.512(<8 x i64>, <16 x i8>, i8)
 
 define <16 x i8>@test_int_x86_avx512_mask_pmovs_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qb_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovsqb %zmm0, %xmm2
-; CHECK-NEXT:    vpmovsqb %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpmovsqb %zmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; CHECK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovs_qb_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpmovsqb %zmm0, %xmm2
+; X64-NEXT:    vpmovsqb %zmm0, %xmm1 {%k1}
+; X64-NEXT:    vpmovsqb %zmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovs_qb_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpmovsqb %zmm0, %xmm2
+; X86-NEXT:    vpmovsqb %zmm0, %xmm1 {%k1}
+; X86-NEXT:    vpmovsqb %zmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.512(<8 x i64> %x0, <16 x i8> %x1, i8 -1)
     %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2)
     %res2 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.qb.512(<8 x i64> %x0, <16 x i8> zeroinitializer, i8 %x2)
@@ -2657,13 +3389,23 @@ define <16 x i8>@test_int_x86_avx512_mas
 declare void @llvm.x86.avx512.mask.pmovs.qb.mem.512(i8* %ptr, <8 x i64>, i8)
 
 define void @test_int_x86_avx512_mask_pmovs_qb_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qb_mem_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpmovsqb %zmm0, (%rdi)
-; CHECK-NEXT:    vpmovsqb %zmm0, (%rdi) {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovs_qb_mem_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpmovsqb %zmm0, (%rdi)
+; X64-NEXT:    vpmovsqb %zmm0, (%rdi) {%k1}
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovs_qb_mem_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovsqb %zmm0, (%eax)
+; X86-NEXT:    vpmovsqb %zmm0, (%eax) {%k1}
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     call void @llvm.x86.avx512.mask.pmovs.qb.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
     call void @llvm.x86.avx512.mask.pmovs.qb.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
     ret void
@@ -2672,16 +3414,28 @@ define void @test_int_x86_avx512_mask_pm
 declare <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.512(<8 x i64>, <16 x i8>, i8)
 
 define <16 x i8>@test_int_x86_avx512_mask_pmovus_qb_512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qb_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovusqb %zmm0, %xmm2
-; CHECK-NEXT:    vpmovusqb %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpmovusqb %zmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; CHECK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovus_qb_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpmovusqb %zmm0, %xmm2
+; X64-NEXT:    vpmovusqb %zmm0, %xmm1 {%k1}
+; X64-NEXT:    vpmovusqb %zmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovus_qb_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpmovusqb %zmm0, %xmm2
+; X86-NEXT:    vpmovusqb %zmm0, %xmm1 {%k1}
+; X86-NEXT:    vpmovusqb %zmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.512(<8 x i64> %x0, <16 x i8> %x1, i8 -1)
     %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.512(<8 x i64> %x0, <16 x i8> %x1, i8 %x2)
     %res2 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.qb.512(<8 x i64> %x0, <16 x i8> zeroinitializer, i8 %x2)
@@ -2693,13 +3447,23 @@ define <16 x i8>@test_int_x86_avx512_mas
 declare void @llvm.x86.avx512.mask.pmovus.qb.mem.512(i8* %ptr, <8 x i64>, i8)
 
 define void @test_int_x86_avx512_mask_pmovus_qb_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qb_mem_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpmovusqb %zmm0, (%rdi)
-; CHECK-NEXT:    vpmovusqb %zmm0, (%rdi) {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovus_qb_mem_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpmovusqb %zmm0, (%rdi)
+; X64-NEXT:    vpmovusqb %zmm0, (%rdi) {%k1}
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovus_qb_mem_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovusqb %zmm0, (%eax)
+; X86-NEXT:    vpmovusqb %zmm0, (%eax) {%k1}
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     call void @llvm.x86.avx512.mask.pmovus.qb.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
     call void @llvm.x86.avx512.mask.pmovus.qb.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
     ret void
@@ -2708,16 +3472,28 @@ define void @test_int_x86_avx512_mask_pm
 declare <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64>, <8 x i16>, i8)
 
 define <8 x i16>@test_int_x86_avx512_mask_pmov_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qw_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovqw %zmm0, %xmm2
-; CHECK-NEXT:    vpmovqw %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpmovqw %zmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
-; CHECK-NEXT:    vpaddw %xmm0, %xmm2, %xmm0
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmov_qw_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpmovqw %zmm0, %xmm2
+; X64-NEXT:    vpmovqw %zmm0, %xmm1 {%k1}
+; X64-NEXT:    vpmovqw %zmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmov_qw_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpmovqw %zmm0, %xmm2
+; X86-NEXT:    vpmovqw %zmm0, %xmm1 {%k1}
+; X86-NEXT:    vpmovqw %zmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64> %x0, <8 x i16> %x1, i8 -1)
     %res1 = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2)
     %res2 = call <8 x i16> @llvm.x86.avx512.mask.pmov.qw.512(<8 x i64> %x0, <8 x i16> zeroinitializer, i8 %x2)
@@ -2729,13 +3505,23 @@ define <8 x i16>@test_int_x86_avx512_mas
 declare void @llvm.x86.avx512.mask.pmov.qw.mem.512(i8* %ptr, <8 x i64>, i8)
 
 define void @test_int_x86_avx512_mask_pmov_qw_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qw_mem_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpmovqw %zmm0, (%rdi)
-; CHECK-NEXT:    vpmovqw %zmm0, (%rdi) {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmov_qw_mem_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpmovqw %zmm0, (%rdi)
+; X64-NEXT:    vpmovqw %zmm0, (%rdi) {%k1}
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmov_qw_mem_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovqw %zmm0, (%eax)
+; X86-NEXT:    vpmovqw %zmm0, (%eax) {%k1}
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     call void @llvm.x86.avx512.mask.pmov.qw.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
     call void @llvm.x86.avx512.mask.pmov.qw.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
     ret void
@@ -2744,16 +3530,28 @@ define void @test_int_x86_avx512_mask_pm
 declare <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.512(<8 x i64>, <8 x i16>, i8)
 
 define <8 x i16>@test_int_x86_avx512_mask_pmovs_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qw_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovsqw %zmm0, %xmm2
-; CHECK-NEXT:    vpmovsqw %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpmovsqw %zmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
-; CHECK-NEXT:    vpaddw %xmm0, %xmm2, %xmm0
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovs_qw_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpmovsqw %zmm0, %xmm2
+; X64-NEXT:    vpmovsqw %zmm0, %xmm1 {%k1}
+; X64-NEXT:    vpmovsqw %zmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovs_qw_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpmovsqw %zmm0, %xmm2
+; X86-NEXT:    vpmovsqw %zmm0, %xmm1 {%k1}
+; X86-NEXT:    vpmovsqw %zmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.512(<8 x i64> %x0, <8 x i16> %x1, i8 -1)
     %res1 = call <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2)
     %res2 = call <8 x i16> @llvm.x86.avx512.mask.pmovs.qw.512(<8 x i64> %x0, <8 x i16> zeroinitializer, i8 %x2)
@@ -2765,13 +3563,23 @@ define <8 x i16>@test_int_x86_avx512_mas
 declare void @llvm.x86.avx512.mask.pmovs.qw.mem.512(i8* %ptr, <8 x i64>, i8)
 
 define void @test_int_x86_avx512_mask_pmovs_qw_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qw_mem_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpmovsqw %zmm0, (%rdi)
-; CHECK-NEXT:    vpmovsqw %zmm0, (%rdi) {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovs_qw_mem_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpmovsqw %zmm0, (%rdi)
+; X64-NEXT:    vpmovsqw %zmm0, (%rdi) {%k1}
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovs_qw_mem_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovsqw %zmm0, (%eax)
+; X86-NEXT:    vpmovsqw %zmm0, (%eax) {%k1}
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     call void @llvm.x86.avx512.mask.pmovs.qw.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
     call void @llvm.x86.avx512.mask.pmovs.qw.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
     ret void
@@ -2780,16 +3588,28 @@ define void @test_int_x86_avx512_mask_pm
 declare <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64>, <8 x i16>, i8)
 
 define <8 x i16>@test_int_x86_avx512_mask_pmovus_qw_512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qw_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovusqw %zmm0, %xmm2
-; CHECK-NEXT:    vpmovusqw %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpmovusqw %zmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
-; CHECK-NEXT:    vpaddw %xmm0, %xmm2, %xmm0
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovus_qw_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpmovusqw %zmm0, %xmm2
+; X64-NEXT:    vpmovusqw %zmm0, %xmm1 {%k1}
+; X64-NEXT:    vpmovusqw %zmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; X64-NEXT:    vpaddw %xmm0, %xmm2, %xmm0
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovus_qw_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpmovusqw %zmm0, %xmm2
+; X86-NEXT:    vpmovusqw %zmm0, %xmm1 {%k1}
+; X86-NEXT:    vpmovusqw %zmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; X86-NEXT:    vpaddw %xmm0, %xmm2, %xmm0
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     %res0 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64> %x0, <8 x i16> %x1, i8 -1)
     %res1 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64> %x0, <8 x i16> %x1, i8 %x2)
     %res2 = call <8 x i16> @llvm.x86.avx512.mask.pmovus.qw.512(<8 x i64> %x0, <8 x i16> zeroinitializer, i8 %x2)
@@ -2801,28 +3621,49 @@ define <8 x i16>@test_int_x86_avx512_mas
 declare void @llvm.x86.avx512.mask.pmovus.qw.mem.512(i8* %ptr, <8 x i64>, i8)
 
 define void @test_int_x86_avx512_mask_pmovus_qw_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qw_mem_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpmovusqw %zmm0, (%rdi)
-; CHECK-NEXT:    vpmovusqw %zmm0, (%rdi) {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovus_qw_mem_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpmovusqw %zmm0, (%rdi)
+; X64-NEXT:    vpmovusqw %zmm0, (%rdi) {%k1}
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovus_qw_mem_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovusqw %zmm0, (%eax)
+; X86-NEXT:    vpmovusqw %zmm0, (%eax) {%k1}
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     call void @llvm.x86.avx512.mask.pmovus.qw.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
     call void @llvm.x86.avx512.mask.pmovus.qw.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
     ret void
 }
 
 define <8 x i32>@test_int_x86_avx512_mask_pmov_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpmovqd %zmm0, %ymm2
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovqd %zmm0, %ymm1 {%k1}
-; CHECK-NEXT:    vpmovqd %zmm0, %ymm0 {%k1} {z}
-; CHECK-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
-; CHECK-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmov_qd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vpmovqd %zmm0, %ymm2
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpmovqd %zmm0, %ymm1 {%k1}
+; X64-NEXT:    vpmovqd %zmm0, %ymm0 {%k1} {z}
+; X64-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; X64-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmov_qd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vpmovqd %zmm0, %ymm2
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpmovqd %zmm0, %ymm1 {%k1}
+; X86-NEXT:    vpmovqd %zmm0, %ymm0 {%k1} {z}
+; X86-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; X86-NEXT:    vpaddd %ymm0, %ymm2, %ymm0
+; X86-NEXT:    retl
   %1 = trunc <8 x i64> %x0 to <8 x i32>
   %2 = trunc <8 x i64> %x0 to <8 x i32>
   %3 = bitcast i8 %x2 to <8 x i1>
@@ -2838,13 +3679,23 @@ define <8 x i32>@test_int_x86_avx512_mas
 declare void @llvm.x86.avx512.mask.pmov.qd.mem.512(i8* %ptr, <8 x i64>, i8)
 
 define void @test_int_x86_avx512_mask_pmov_qd_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmov_qd_mem_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpmovqd %zmm0, (%rdi)
-; CHECK-NEXT:    vpmovqd %zmm0, (%rdi) {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmov_qd_mem_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpmovqd %zmm0, (%rdi)
+; X64-NEXT:    vpmovqd %zmm0, (%rdi) {%k1}
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmov_qd_mem_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovqd %zmm0, (%eax)
+; X86-NEXT:    vpmovqd %zmm0, (%eax) {%k1}
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     call void @llvm.x86.avx512.mask.pmov.qd.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
     call void @llvm.x86.avx512.mask.pmov.qd.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
     ret void
@@ -2853,15 +3704,26 @@ define void @test_int_x86_avx512_mask_pm
 declare <8 x i32> @llvm.x86.avx512.mask.pmovs.qd.512(<8 x i64>, <8 x i32>, i8)
 
 define <8 x i32>@test_int_x86_avx512_mask_pmovs_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovsqd %zmm0, %ymm2 {%k1} {z}
-; CHECK-NEXT:    vpmovsqd %zmm0, %ymm1 {%k1}
-; CHECK-NEXT:    vpaddd %ymm2, %ymm1, %ymm1
-; CHECK-NEXT:    vpmovsqd %zmm0, %ymm0
-; CHECK-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovs_qd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpmovsqd %zmm0, %ymm2 {%k1} {z}
+; X64-NEXT:    vpmovsqd %zmm0, %ymm1 {%k1}
+; X64-NEXT:    vpaddd %ymm2, %ymm1, %ymm1
+; X64-NEXT:    vpmovsqd %zmm0, %ymm0
+; X64-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovs_qd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpmovsqd %zmm0, %ymm1 {%k1}
+; X86-NEXT:    vpmovsqd %zmm0, %ymm2 {%k1} {z}
+; X86-NEXT:    vpaddd %ymm2, %ymm1, %ymm1
+; X86-NEXT:    vpmovsqd %zmm0, %ymm0
+; X86-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
+; X86-NEXT:    retl
     %res0 = call <8 x i32> @llvm.x86.avx512.mask.pmovs.qd.512(<8 x i64> %x0, <8 x i32> %x1, i8 -1)
     %res1 = call <8 x i32> @llvm.x86.avx512.mask.pmovs.qd.512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2)
     %res2 = call <8 x i32> @llvm.x86.avx512.mask.pmovs.qd.512(<8 x i64> %x0, <8 x i32> zeroinitializer, i8 %x2)
@@ -2873,13 +3735,23 @@ define <8 x i32>@test_int_x86_avx512_mas
 declare void @llvm.x86.avx512.mask.pmovs.qd.mem.512(i8* %ptr, <8 x i64>, i8)
 
 define void @test_int_x86_avx512_mask_pmovs_qd_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_qd_mem_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpmovsqd %zmm0, (%rdi)
-; CHECK-NEXT:    vpmovsqd %zmm0, (%rdi) {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovs_qd_mem_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpmovsqd %zmm0, (%rdi)
+; X64-NEXT:    vpmovsqd %zmm0, (%rdi) {%k1}
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovs_qd_mem_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovsqd %zmm0, (%eax)
+; X86-NEXT:    vpmovsqd %zmm0, (%eax) {%k1}
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     call void @llvm.x86.avx512.mask.pmovs.qd.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
     call void @llvm.x86.avx512.mask.pmovs.qd.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
     ret void
@@ -2888,15 +3760,26 @@ define void @test_int_x86_avx512_mask_pm
 declare <8 x i32> @llvm.x86.avx512.mask.pmovus.qd.512(<8 x i64>, <8 x i32>, i8)
 
 define <8 x i32>@test_int_x86_avx512_mask_pmovus_qd_512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovusqd %zmm0, %ymm2 {%k1} {z}
-; CHECK-NEXT:    vpmovusqd %zmm0, %ymm1 {%k1}
-; CHECK-NEXT:    vpaddd %ymm2, %ymm1, %ymm1
-; CHECK-NEXT:    vpmovusqd %zmm0, %ymm0
-; CHECK-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovus_qd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpmovusqd %zmm0, %ymm2 {%k1} {z}
+; X64-NEXT:    vpmovusqd %zmm0, %ymm1 {%k1}
+; X64-NEXT:    vpaddd %ymm2, %ymm1, %ymm1
+; X64-NEXT:    vpmovusqd %zmm0, %ymm0
+; X64-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovus_qd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpmovusqd %zmm0, %ymm1 {%k1}
+; X86-NEXT:    vpmovusqd %zmm0, %ymm2 {%k1} {z}
+; X86-NEXT:    vpaddd %ymm2, %ymm1, %ymm1
+; X86-NEXT:    vpmovusqd %zmm0, %ymm0
+; X86-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
+; X86-NEXT:    retl
     %res0 = call <8 x i32> @llvm.x86.avx512.mask.pmovus.qd.512(<8 x i64> %x0, <8 x i32> %x1, i8 -1)
     %res1 = call <8 x i32> @llvm.x86.avx512.mask.pmovus.qd.512(<8 x i64> %x0, <8 x i32> %x1, i8 %x2)
     %res2 = call <8 x i32> @llvm.x86.avx512.mask.pmovus.qd.512(<8 x i64> %x0, <8 x i32> zeroinitializer, i8 %x2)
@@ -2908,13 +3791,23 @@ define <8 x i32>@test_int_x86_avx512_mas
 declare void @llvm.x86.avx512.mask.pmovus.qd.mem.512(i8* %ptr, <8 x i64>, i8)
 
 define void @test_int_x86_avx512_mask_pmovus_qd_mem_512(i8* %ptr, <8 x i64> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_qd_mem_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpmovusqd %zmm0, (%rdi)
-; CHECK-NEXT:    vpmovusqd %zmm0, (%rdi) {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovus_qd_mem_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpmovusqd %zmm0, (%rdi)
+; X64-NEXT:    vpmovusqd %zmm0, (%rdi) {%k1}
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovus_qd_mem_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovusqd %zmm0, (%eax)
+; X86-NEXT:    vpmovusqd %zmm0, (%eax) {%k1}
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     call void @llvm.x86.avx512.mask.pmovus.qd.mem.512(i8* %ptr, <8 x i64> %x1, i8 -1)
     call void @llvm.x86.avx512.mask.pmovus.qd.mem.512(i8* %ptr, <8 x i64> %x1, i8 %x2)
     ret void
@@ -2923,16 +3816,27 @@ define void @test_int_x86_avx512_mask_pm
 declare <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32>, <16 x i8>, i16)
 
 define <16 x i8>@test_int_x86_avx512_mask_pmov_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmov_db_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovdb %zmm0, %xmm2
-; CHECK-NEXT:    vpmovdb %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpmovdb %zmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; CHECK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmov_db_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpmovdb %zmm0, %xmm2
+; X64-NEXT:    vpmovdb %zmm0, %xmm1 {%k1}
+; X64-NEXT:    vpmovdb %zmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmov_db_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpmovdb %zmm0, %xmm2
+; X86-NEXT:    vpmovdb %zmm0, %xmm1 {%k1}
+; X86-NEXT:    vpmovdb %zmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32> %x0, <16 x i8> %x1, i16 -1)
     %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2)
     %res2 = call <16 x i8> @llvm.x86.avx512.mask.pmov.db.512(<16 x i32> %x0, <16 x i8> zeroinitializer, i16 %x2)
@@ -2944,13 +3848,22 @@ define <16 x i8>@test_int_x86_avx512_mas
 declare void @llvm.x86.avx512.mask.pmov.db.mem.512(i8* %ptr, <16 x i32>, i16)
 
 define void @test_int_x86_avx512_mask_pmov_db_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmov_db_mem_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpmovdb %zmm0, (%rdi)
-; CHECK-NEXT:    vpmovdb %zmm0, (%rdi) {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmov_db_mem_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpmovdb %zmm0, (%rdi)
+; X64-NEXT:    vpmovdb %zmm0, (%rdi) {%k1}
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmov_db_mem_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovdb %zmm0, (%eax)
+; X86-NEXT:    vpmovdb %zmm0, (%eax) {%k1}
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     call void @llvm.x86.avx512.mask.pmov.db.mem.512(i8* %ptr, <16 x i32> %x1, i16 -1)
     call void @llvm.x86.avx512.mask.pmov.db.mem.512(i8* %ptr, <16 x i32> %x1, i16 %x2)
     ret void
@@ -2959,16 +3872,27 @@ define void @test_int_x86_avx512_mask_pm
 declare <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32>, <16 x i8>, i16)
 
 define <16 x i8>@test_int_x86_avx512_mask_pmovs_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_db_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovsdb %zmm0, %xmm2
-; CHECK-NEXT:    vpmovsdb %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpmovsdb %zmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; CHECK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovs_db_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpmovsdb %zmm0, %xmm2
+; X64-NEXT:    vpmovsdb %zmm0, %xmm1 {%k1}
+; X64-NEXT:    vpmovsdb %zmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovs_db_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpmovsdb %zmm0, %xmm2
+; X86-NEXT:    vpmovsdb %zmm0, %xmm1 {%k1}
+; X86-NEXT:    vpmovsdb %zmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32> %x0, <16 x i8> %x1, i16 -1)
     %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2)
     %res2 = call <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32> %x0, <16 x i8> zeroinitializer, i16 %x2)
@@ -2980,13 +3904,22 @@ define <16 x i8>@test_int_x86_avx512_mas
 declare void @llvm.x86.avx512.mask.pmovs.db.mem.512(i8* %ptr, <16 x i32>, i16)
 
 define void @test_int_x86_avx512_mask_pmovs_db_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_db_mem_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpmovsdb %zmm0, (%rdi)
-; CHECK-NEXT:    vpmovsdb %zmm0, (%rdi) {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovs_db_mem_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpmovsdb %zmm0, (%rdi)
+; X64-NEXT:    vpmovsdb %zmm0, (%rdi) {%k1}
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovs_db_mem_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovsdb %zmm0, (%eax)
+; X86-NEXT:    vpmovsdb %zmm0, (%eax) {%k1}
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     call void @llvm.x86.avx512.mask.pmovs.db.mem.512(i8* %ptr, <16 x i32> %x1, i16 -1)
     call void @llvm.x86.avx512.mask.pmovs.db.mem.512(i8* %ptr, <16 x i32> %x1, i16 %x2)
     ret void
@@ -2995,16 +3928,27 @@ define void @test_int_x86_avx512_mask_pm
 declare <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32>, <16 x i8>, i16)
 
 define <16 x i8>@test_int_x86_avx512_mask_pmovus_db_512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_db_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovusdb %zmm0, %xmm2
-; CHECK-NEXT:    vpmovusdb %zmm0, %xmm1 {%k1}
-; CHECK-NEXT:    vpmovusdb %zmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; CHECK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovus_db_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpmovusdb %zmm0, %xmm2
+; X64-NEXT:    vpmovusdb %zmm0, %xmm1 {%k1}
+; X64-NEXT:    vpmovusdb %zmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; X64-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovus_db_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpmovusdb %zmm0, %xmm2
+; X86-NEXT:    vpmovusdb %zmm0, %xmm1 {%k1}
+; X86-NEXT:    vpmovusdb %zmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; X86-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     %res0 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32> %x0, <16 x i8> %x1, i16 -1)
     %res1 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32> %x0, <16 x i8> %x1, i16 %x2)
     %res2 = call <16 x i8> @llvm.x86.avx512.mask.pmovus.db.512(<16 x i32> %x0, <16 x i8> zeroinitializer, i16 %x2)
@@ -3016,13 +3960,22 @@ define <16 x i8>@test_int_x86_avx512_mas
 declare void @llvm.x86.avx512.mask.pmovus.db.mem.512(i8* %ptr, <16 x i32>, i16)
 
 define void @test_int_x86_avx512_mask_pmovus_db_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_db_mem_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpmovusdb %zmm0, (%rdi)
-; CHECK-NEXT:    vpmovusdb %zmm0, (%rdi) {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovus_db_mem_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpmovusdb %zmm0, (%rdi)
+; X64-NEXT:    vpmovusdb %zmm0, (%rdi) {%k1}
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovus_db_mem_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovusdb %zmm0, (%eax)
+; X86-NEXT:    vpmovusdb %zmm0, (%eax) {%k1}
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     call void @llvm.x86.avx512.mask.pmovus.db.mem.512(i8* %ptr, <16 x i32> %x1, i16 -1)
     call void @llvm.x86.avx512.mask.pmovus.db.mem.512(i8* %ptr, <16 x i32> %x1, i16 %x2)
     ret void
@@ -3031,15 +3984,25 @@ define void @test_int_x86_avx512_mask_pm
 declare <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32>, <16 x i16>, i16)
 
 define <16 x i16>@test_int_x86_avx512_mask_pmov_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmov_dw_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovdw %zmm0, %ymm2
-; CHECK-NEXT:    vpmovdw %zmm0, %ymm1 {%k1}
-; CHECK-NEXT:    vpmovdw %zmm0, %ymm0 {%k1} {z}
-; CHECK-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
-; CHECK-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmov_dw_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpmovdw %zmm0, %ymm2
+; X64-NEXT:    vpmovdw %zmm0, %ymm1 {%k1}
+; X64-NEXT:    vpmovdw %zmm0, %ymm0 {%k1} {z}
+; X64-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; X64-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmov_dw_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpmovdw %zmm0, %ymm2
+; X86-NEXT:    vpmovdw %zmm0, %ymm1 {%k1}
+; X86-NEXT:    vpmovdw %zmm0, %ymm0 {%k1} {z}
+; X86-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; X86-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
+; X86-NEXT:    retl
     %res0 = call <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 -1)
     %res1 = call <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2)
     %res2 = call <16 x i16> @llvm.x86.avx512.mask.pmov.dw.512(<16 x i32> %x0, <16 x i16> zeroinitializer, i16 %x2)
@@ -3051,13 +4014,22 @@ define <16 x i16>@test_int_x86_avx512_ma
 declare void @llvm.x86.avx512.mask.pmov.dw.mem.512(i8* %ptr, <16 x i32>, i16)
 
 define void @test_int_x86_avx512_mask_pmov_dw_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmov_dw_mem_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpmovdw %zmm0, (%rdi)
-; CHECK-NEXT:    vpmovdw %zmm0, (%rdi) {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmov_dw_mem_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpmovdw %zmm0, (%rdi)
+; X64-NEXT:    vpmovdw %zmm0, (%rdi) {%k1}
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmov_dw_mem_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovdw %zmm0, (%eax)
+; X86-NEXT:    vpmovdw %zmm0, (%eax) {%k1}
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     call void @llvm.x86.avx512.mask.pmov.dw.mem.512(i8* %ptr, <16 x i32> %x1, i16 -1)
     call void @llvm.x86.avx512.mask.pmov.dw.mem.512(i8* %ptr, <16 x i32> %x1, i16 %x2)
     ret void
@@ -3066,15 +4038,25 @@ define void @test_int_x86_avx512_mask_pm
 declare <16 x i16> @llvm.x86.avx512.mask.pmovs.dw.512(<16 x i32>, <16 x i16>, i16)
 
 define <16 x i16>@test_int_x86_avx512_mask_pmovs_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_dw_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovsdw %zmm0, %ymm2
-; CHECK-NEXT:    vpmovsdw %zmm0, %ymm1 {%k1}
-; CHECK-NEXT:    vpmovsdw %zmm0, %ymm0 {%k1} {z}
-; CHECK-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
-; CHECK-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovs_dw_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpmovsdw %zmm0, %ymm2
+; X64-NEXT:    vpmovsdw %zmm0, %ymm1 {%k1}
+; X64-NEXT:    vpmovsdw %zmm0, %ymm0 {%k1} {z}
+; X64-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; X64-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovs_dw_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpmovsdw %zmm0, %ymm2
+; X86-NEXT:    vpmovsdw %zmm0, %ymm1 {%k1}
+; X86-NEXT:    vpmovsdw %zmm0, %ymm0 {%k1} {z}
+; X86-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; X86-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
+; X86-NEXT:    retl
     %res0 = call <16 x i16> @llvm.x86.avx512.mask.pmovs.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 -1)
     %res1 = call <16 x i16> @llvm.x86.avx512.mask.pmovs.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2)
     %res2 = call <16 x i16> @llvm.x86.avx512.mask.pmovs.dw.512(<16 x i32> %x0, <16 x i16> zeroinitializer, i16 %x2)
@@ -3086,13 +4068,22 @@ define <16 x i16>@test_int_x86_avx512_ma
 declare void @llvm.x86.avx512.mask.pmovs.dw.mem.512(i8* %ptr, <16 x i32>, i16)
 
 define void @test_int_x86_avx512_mask_pmovs_dw_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovs_dw_mem_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpmovsdw %zmm0, (%rdi)
-; CHECK-NEXT:    vpmovsdw %zmm0, (%rdi) {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovs_dw_mem_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpmovsdw %zmm0, (%rdi)
+; X64-NEXT:    vpmovsdw %zmm0, (%rdi) {%k1}
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovs_dw_mem_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovsdw %zmm0, (%eax)
+; X86-NEXT:    vpmovsdw %zmm0, (%eax) {%k1}
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     call void @llvm.x86.avx512.mask.pmovs.dw.mem.512(i8* %ptr, <16 x i32> %x1, i16 -1)
     call void @llvm.x86.avx512.mask.pmovs.dw.mem.512(i8* %ptr, <16 x i32> %x1, i16 %x2)
     ret void
@@ -3101,15 +4092,25 @@ define void @test_int_x86_avx512_mask_pm
 declare <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32>, <16 x i16>, i16)
 
 define <16 x i16>@test_int_x86_avx512_mask_pmovus_dw_512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_dw_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpmovusdw %zmm0, %ymm2
-; CHECK-NEXT:    vpmovusdw %zmm0, %ymm1 {%k1}
-; CHECK-NEXT:    vpmovusdw %zmm0, %ymm0 {%k1} {z}
-; CHECK-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
-; CHECK-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovus_dw_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpmovusdw %zmm0, %ymm2
+; X64-NEXT:    vpmovusdw %zmm0, %ymm1 {%k1}
+; X64-NEXT:    vpmovusdw %zmm0, %ymm0 {%k1} {z}
+; X64-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; X64-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovus_dw_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpmovusdw %zmm0, %ymm2
+; X86-NEXT:    vpmovusdw %zmm0, %ymm1 {%k1}
+; X86-NEXT:    vpmovusdw %zmm0, %ymm0 {%k1} {z}
+; X86-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; X86-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
+; X86-NEXT:    retl
     %res0 = call <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 -1)
     %res1 = call <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32> %x0, <16 x i16> %x1, i16 %x2)
     %res2 = call <16 x i16> @llvm.x86.avx512.mask.pmovus.dw.512(<16 x i32> %x0, <16 x i16> zeroinitializer, i16 %x2)
@@ -3121,13 +4122,22 @@ define <16 x i16>@test_int_x86_avx512_ma
 declare void @llvm.x86.avx512.mask.pmovus.dw.mem.512(i8* %ptr, <16 x i32>, i16)
 
 define void @test_int_x86_avx512_mask_pmovus_dw_mem_512(i8* %ptr, <16 x i32> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pmovus_dw_mem_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vpmovusdw %zmm0, (%rdi)
-; CHECK-NEXT:    vpmovusdw %zmm0, (%rdi) {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pmovus_dw_mem_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vpmovusdw %zmm0, (%rdi)
+; X64-NEXT:    vpmovusdw %zmm0, (%rdi) {%k1}
+; X64-NEXT:    vzeroupper
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pmovus_dw_mem_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovusdw %zmm0, (%eax)
+; X86-NEXT:    vpmovusdw %zmm0, (%eax) {%k1}
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
     call void @llvm.x86.avx512.mask.pmovus.dw.mem.512(i8* %ptr, <16 x i32> %x1, i16 -1)
     call void @llvm.x86.avx512.mask.pmovus.dw.mem.512(i8* %ptr, <16 x i32> %x1, i16 %x2)
     ret void
@@ -3136,13 +4146,21 @@ define void @test_int_x86_avx512_mask_pm
 declare <16 x float> @llvm.x86.avx512.sitofp.round.v16f32.v16i32(<16 x i32>, i32)
 
 define <16 x float>@test_int_x86_avx512_mask_cvt_dq2ps_512(<16 x i32> %x0, <16 x float> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cvt_dq2ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvtdq2ps %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vcvtdq2ps {rn-sae}, %zmm0, %zmm0
-; CHECK-NEXT:    vaddps %zmm0, %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cvt_dq2ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvtdq2ps %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vcvtdq2ps {rn-sae}, %zmm0, %zmm0
+; X64-NEXT:    vaddps %zmm0, %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cvt_dq2ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vcvtdq2ps %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vcvtdq2ps {rn-sae}, %zmm0, %zmm0
+; X86-NEXT:    vaddps %zmm0, %zmm1, %zmm0
+; X86-NEXT:    retl
   %cvt = sitofp <16 x i32> %x0 to <16 x float>
   %1 = bitcast i16 %x2 to <16 x i1>
   %2 = select <16 x i1> %1, <16 x float> %cvt, <16 x float> %x1
@@ -3154,13 +4172,22 @@ define <16 x float>@test_int_x86_avx512_
 declare <8 x i32> @llvm.x86.avx512.mask.cvtpd2dq.512(<8 x double>, <8 x i32>, i8, i32)
 
 define <8 x i32>@test_int_x86_avx512_mask_cvt_pd2dq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2dq_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvtpd2dq %zmm0, %ymm1 {%k1}
-; CHECK-NEXT:    vcvtpd2dq {rn-sae}, %zmm0, %ymm0
-; CHECK-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cvt_pd2dq_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvtpd2dq %zmm0, %ymm1 {%k1}
+; X64-NEXT:    vcvtpd2dq {rn-sae}, %zmm0, %ymm0
+; X64-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cvt_pd2dq_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vcvtpd2dq %zmm0, %ymm1 {%k1}
+; X86-NEXT:    vcvtpd2dq {rn-sae}, %zmm0, %ymm0
+; X86-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; X86-NEXT:    retl
   %res = call <8 x i32> @llvm.x86.avx512.mask.cvtpd2dq.512(<8 x double> %x0, <8 x i32> %x1, i8 %x2, i32 4)
   %res1 = call <8 x i32> @llvm.x86.avx512.mask.cvtpd2dq.512(<8 x double> %x0, <8 x i32> %x1, i8 -1, i32 0)
   %res2 = add <8 x i32> %res, %res1
@@ -3170,13 +4197,22 @@ define <8 x i32>@test_int_x86_avx512_mas
 declare <8 x float> @llvm.x86.avx512.mask.cvtpd2ps.512(<8 x double>, <8 x float>, i8, i32)
 
 define <8 x float>@test_int_x86_avx512_mask_cvt_pd2ps_512(<8 x double> %x0, <8 x float> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvtpd2ps %zmm0, %ymm1 {%k1}
-; CHECK-NEXT:    vcvtpd2ps {ru-sae}, %zmm0, %ymm0
-; CHECK-NEXT:    vaddps %ymm0, %ymm1, %ymm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cvt_pd2ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvtpd2ps %zmm0, %ymm1 {%k1}
+; X64-NEXT:    vcvtpd2ps {ru-sae}, %zmm0, %ymm0
+; X64-NEXT:    vaddps %ymm0, %ymm1, %ymm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cvt_pd2ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vcvtpd2ps %zmm0, %ymm1 {%k1}
+; X86-NEXT:    vcvtpd2ps {ru-sae}, %zmm0, %ymm0
+; X86-NEXT:    vaddps %ymm0, %ymm1, %ymm0
+; X86-NEXT:    retl
   %res = call <8 x float> @llvm.x86.avx512.mask.cvtpd2ps.512(<8 x double> %x0, <8 x float> %x1, i8 %x2, i32 4)
   %res1 = call <8 x float> @llvm.x86.avx512.mask.cvtpd2ps.512(<8 x double> %x0, <8 x float> %x1, i8 -1, i32 2)
   %res2 = fadd <8 x float> %res, %res1
@@ -3186,13 +4222,22 @@ define <8 x float>@test_int_x86_avx512_m
 declare <8 x i32> @llvm.x86.avx512.mask.cvtpd2udq.512(<8 x double>, <8 x i32>, i8, i32)
 
 define <8 x i32>@test_int_x86_avx512_mask_cvt_pd2udq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cvt_pd2udq_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvtpd2udq {ru-sae}, %zmm0, %ymm1 {%k1}
-; CHECK-NEXT:    vcvtpd2udq {rn-sae}, %zmm0, %ymm0
-; CHECK-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cvt_pd2udq_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvtpd2udq {ru-sae}, %zmm0, %ymm1 {%k1}
+; X64-NEXT:    vcvtpd2udq {rn-sae}, %zmm0, %ymm0
+; X64-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cvt_pd2udq_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vcvtpd2udq {ru-sae}, %zmm0, %ymm1 {%k1}
+; X86-NEXT:    vcvtpd2udq {rn-sae}, %zmm0, %ymm0
+; X86-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; X86-NEXT:    retl
   %res = call <8 x i32> @llvm.x86.avx512.mask.cvtpd2udq.512(<8 x double> %x0, <8 x i32> %x1, i8 %x2, i32 2)
   %res1 = call <8 x i32> @llvm.x86.avx512.mask.cvtpd2udq.512(<8 x double> %x0, <8 x i32> %x1, i8 -1, i32 0)
   %res2 = add <8 x i32> %res, %res1
@@ -3202,13 +4247,21 @@ define <8 x i32>@test_int_x86_avx512_mas
 declare <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512(<16 x float>, <16 x i32>, i16, i32)
 
 define <16 x i32>@test_int_x86_avx512_mask_cvt_ps2dq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2dq_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvtps2dq {ru-sae}, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vcvtps2dq {rn-sae}, %zmm0, %zmm0
-; CHECK-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cvt_ps2dq_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvtps2dq {ru-sae}, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vcvtps2dq {rn-sae}, %zmm0, %zmm0
+; X64-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cvt_ps2dq_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vcvtps2dq {ru-sae}, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vcvtps2dq {rn-sae}, %zmm0, %zmm0
+; X86-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512(<16 x float> %x0, <16 x i32> %x1, i16 %x2, i32 2)
   %res1 = call <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512(<16 x float> %x0, <16 x i32> %x1, i16 -1, i32 0)
   %res2 = add <16 x i32> %res, %res1
@@ -3218,13 +4271,22 @@ define <16 x i32>@test_int_x86_avx512_ma
 declare <8 x double> @llvm.x86.avx512.mask.cvtps2pd.512(<8 x float>, <8 x double>, i8, i32)
 
 define <8 x double>@test_int_x86_avx512_mask_cvt_ps2pd_512(<8 x float> %x0, <8 x double> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvtps2pd %ymm0, %zmm1 {%k1}
-; CHECK-NEXT:    vcvtps2pd {sae}, %ymm0, %zmm0
-; CHECK-NEXT:    vaddpd %zmm0, %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cvt_ps2pd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvtps2pd %ymm0, %zmm1 {%k1}
+; X64-NEXT:    vcvtps2pd {sae}, %ymm0, %zmm0
+; X64-NEXT:    vaddpd %zmm0, %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cvt_ps2pd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vcvtps2pd %ymm0, %zmm1 {%k1}
+; X86-NEXT:    vcvtps2pd {sae}, %ymm0, %zmm0
+; X86-NEXT:    vaddpd %zmm0, %zmm1, %zmm0
+; X86-NEXT:    retl
   %res = call <8 x double> @llvm.x86.avx512.mask.cvtps2pd.512(<8 x float> %x0, <8 x double> %x1, i8 %x2, i32 4)
   %res1 = call <8 x double> @llvm.x86.avx512.mask.cvtps2pd.512(<8 x float> %x0, <8 x double> %x1, i8 -1, i32 8)
   %res2 = fadd <8 x double> %res, %res1
@@ -3234,13 +4296,21 @@ define <8 x double>@test_int_x86_avx512_
 declare <16 x i32> @llvm.x86.avx512.mask.cvtps2udq.512(<16 x float>, <16 x i32>, i16, i32)
 
 define <16 x i32>@test_int_x86_avx512_mask_cvt_ps2udq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ps2udq_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvtps2udq {ru-sae}, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vcvtps2udq {rn-sae}, %zmm0, %zmm0
-; CHECK-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cvt_ps2udq_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvtps2udq {ru-sae}, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vcvtps2udq {rn-sae}, %zmm0, %zmm0
+; X64-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cvt_ps2udq_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vcvtps2udq {ru-sae}, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vcvtps2udq {rn-sae}, %zmm0, %zmm0
+; X86-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.mask.cvtps2udq.512(<16 x float> %x0, <16 x i32> %x1, i16 %x2, i32 2)
   %res1 = call <16 x i32> @llvm.x86.avx512.mask.cvtps2udq.512(<16 x float> %x0, <16 x i32> %x1, i16 -1, i32 0)
   %res2 = add <16 x i32> %res, %res1
@@ -3250,13 +4320,22 @@ define <16 x i32>@test_int_x86_avx512_ma
 declare <8 x i32> @llvm.x86.avx512.mask.cvttpd2dq.512(<8 x double>, <8 x i32>, i8, i32)
 
 define <8 x i32>@test_int_x86_avx512_mask_cvtt_pd2dq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2dq_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvttpd2dq %zmm0, %ymm1 {%k1}
-; CHECK-NEXT:    vcvttpd2dq {sae}, %zmm0, %ymm0
-; CHECK-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cvtt_pd2dq_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvttpd2dq %zmm0, %ymm1 {%k1}
+; X64-NEXT:    vcvttpd2dq {sae}, %zmm0, %ymm0
+; X64-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cvtt_pd2dq_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vcvttpd2dq %zmm0, %ymm1 {%k1}
+; X86-NEXT:    vcvttpd2dq {sae}, %zmm0, %ymm0
+; X86-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; X86-NEXT:    retl
   %res = call <8 x i32> @llvm.x86.avx512.mask.cvttpd2dq.512(<8 x double> %x0, <8 x i32> %x1, i8 %x2, i32 4)
   %res1 = call <8 x i32> @llvm.x86.avx512.mask.cvttpd2dq.512(<8 x double> %x0, <8 x i32> %x1, i8 -1, i32 8)
   %res2 = add <8 x i32> %res, %res1
@@ -3266,13 +4345,21 @@ define <8 x i32>@test_int_x86_avx512_mas
 declare <16 x float> @llvm.x86.avx512.uitofp.round.v16f32.v16i32(<16 x i32>, i32)
 
 define <16 x float>@test_int_x86_avx512_mask_cvt_udq2ps_512(<16 x i32> %x0, <16 x float> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cvt_udq2ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvtudq2ps %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vcvtudq2ps {rn-sae}, %zmm0, %zmm0
-; CHECK-NEXT:    vaddps %zmm0, %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cvt_udq2ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvtudq2ps %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vcvtudq2ps {rn-sae}, %zmm0, %zmm0
+; X64-NEXT:    vaddps %zmm0, %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cvt_udq2ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vcvtudq2ps %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vcvtudq2ps {rn-sae}, %zmm0, %zmm0
+; X86-NEXT:    vaddps %zmm0, %zmm1, %zmm0
+; X86-NEXT:    retl
   %cvt = uitofp <16 x i32> %x0 to <16 x float>
   %1 = bitcast i16 %x2 to <16 x i1>
   %2 = select <16 x i1> %1, <16 x float> %cvt, <16 x float> %x1
@@ -3284,13 +4371,22 @@ define <16 x float>@test_int_x86_avx512_
 declare <8 x i32> @llvm.x86.avx512.mask.cvttpd2udq.512(<8 x double>, <8 x i32>, i8, i32)
 
 define <8 x i32>@test_int_x86_avx512_mask_cvtt_pd2udq_512(<8 x double> %x0, <8 x i32> %x1, i8 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_pd2udq_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvttpd2udq %zmm0, %ymm1 {%k1}
-; CHECK-NEXT:    vcvttpd2udq {sae}, %zmm0, %ymm0
-; CHECK-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cvtt_pd2udq_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvttpd2udq %zmm0, %ymm1 {%k1}
+; X64-NEXT:    vcvttpd2udq {sae}, %zmm0, %ymm0
+; X64-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cvtt_pd2udq_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vcvttpd2udq %zmm0, %ymm1 {%k1}
+; X86-NEXT:    vcvttpd2udq {sae}, %zmm0, %ymm0
+; X86-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; X86-NEXT:    retl
   %res = call <8 x i32> @llvm.x86.avx512.mask.cvttpd2udq.512(<8 x double> %x0, <8 x i32> %x1, i8 %x2, i32 4)
   %res1 = call <8 x i32> @llvm.x86.avx512.mask.cvttpd2udq.512(<8 x double> %x0, <8 x i32> %x1, i8 -1, i32 8)
   %res2 = add <8 x i32> %res, %res1
@@ -3300,13 +4396,21 @@ define <8 x i32>@test_int_x86_avx512_mas
 declare <16 x i32> @llvm.x86.avx512.mask.cvttps2dq.512(<16 x float>, <16 x i32>, i16, i32)
 
 define <16 x i32>@test_int_x86_avx512_mask_cvtt_ps2dq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2dq_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvttps2dq %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vcvttps2dq {sae}, %zmm0, %zmm0
-; CHECK-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cvtt_ps2dq_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvttps2dq %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vcvttps2dq {sae}, %zmm0, %zmm0
+; X64-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cvtt_ps2dq_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vcvttps2dq %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vcvttps2dq {sae}, %zmm0, %zmm0
+; X86-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.mask.cvttps2dq.512(<16 x float> %x0, <16 x i32> %x1, i16 %x2, i32 4)
   %res1 = call <16 x i32> @llvm.x86.avx512.mask.cvttps2dq.512(<16 x float> %x0, <16 x i32> %x1, i16 -1, i32 8)
   %res2 = add <16 x i32> %res, %res1
@@ -3316,13 +4420,21 @@ define <16 x i32>@test_int_x86_avx512_ma
 declare <16 x i32> @llvm.x86.avx512.mask.cvttps2udq.512(<16 x float>, <16 x i32>, i16, i32)
 
 define <16 x i32>@test_int_x86_avx512_mask_cvtt_ps2udq_512(<16 x float> %x0, <16 x i32> %x1, i16 %x2) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cvtt_ps2udq_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvttps2udq %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vcvttps2udq {sae}, %zmm0, %zmm0
-; CHECK-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cvtt_ps2udq_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvttps2udq %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vcvttps2udq {sae}, %zmm0, %zmm0
+; X64-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cvtt_ps2udq_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vcvttps2udq %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vcvttps2udq {sae}, %zmm0, %zmm0
+; X86-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.mask.cvttps2udq.512(<16 x float> %x0, <16 x i32> %x1, i16 %x2, i32 4)
   %res1 = call <16 x i32> @llvm.x86.avx512.mask.cvttps2udq.512(<16 x float> %x0, <16 x i32> %x1, i16 -1, i32 8)
   %res2 = add <16 x i32> %res, %res1
@@ -3332,18 +4444,32 @@ define <16 x i32>@test_int_x86_avx512_ma
 declare <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) nounwind readnone
 
 define <4 x float> @test_getexp_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
-; CHECK-LABEL: test_getexp_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovaps %xmm2, %xmm3
-; CHECK-NEXT:    vgetexpss %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT:    vgetexpss {sae}, %xmm1, %xmm0, %xmm4 {%k1} {z}
-; CHECK-NEXT:    vgetexpss {sae}, %xmm1, %xmm0, %xmm5
-; CHECK-NEXT:    vaddps %xmm5, %xmm4, %xmm4
-; CHECK-NEXT:    vgetexpss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vaddps %xmm2, %xmm3, %xmm0
-; CHECK-NEXT:    vaddps %xmm4, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_getexp_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovaps %xmm2, %xmm3
+; X64-NEXT:    vgetexpss %xmm1, %xmm0, %xmm3 {%k1}
+; X64-NEXT:    vgetexpss {sae}, %xmm1, %xmm0, %xmm4 {%k1} {z}
+; X64-NEXT:    vgetexpss {sae}, %xmm1, %xmm0, %xmm5
+; X64-NEXT:    vaddps %xmm5, %xmm4, %xmm4
+; X64-NEXT:    vgetexpss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vaddps %xmm2, %xmm3, %xmm0
+; X64-NEXT:    vaddps %xmm4, %xmm0, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_getexp_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovaps %xmm2, %xmm3
+; X86-NEXT:    vgetexpss %xmm1, %xmm0, %xmm3 {%k1}
+; X86-NEXT:    vgetexpss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vaddps %xmm2, %xmm3, %xmm2
+; X86-NEXT:    vgetexpss {sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
+; X86-NEXT:    vgetexpss {sae}, %xmm1, %xmm0, %xmm0
+; X86-NEXT:    vaddps %xmm0, %xmm3, %xmm0
+; X86-NEXT:    vaddps %xmm0, %xmm2, %xmm0
+; X86-NEXT:    retl
   %res0 = call <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
   %res1 = call <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 8)
   %res2 = call <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %mask, i32 8)
@@ -3358,18 +4484,32 @@ define <4 x float> @test_getexp_ss(<4 x
 declare <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) nounwind readnone
 
 define <2 x double> @test_getexp_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
-; CHECK-LABEL: test_getexp_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vgetexpsd %xmm1, %xmm0, %xmm3
-; CHECK-NEXT:    vmovapd %xmm2, %xmm4
-; CHECK-NEXT:    vgetexpsd %xmm1, %xmm0, %xmm4 {%k1}
-; CHECK-NEXT:    vgetexpsd {sae}, %xmm1, %xmm0, %xmm5 {%k1} {z}
-; CHECK-NEXT:    vaddpd %xmm3, %xmm5, %xmm3
-; CHECK-NEXT:    vgetexpsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vaddpd %xmm2, %xmm4, %xmm0
-; CHECK-NEXT:    vaddpd %xmm3, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_getexp_sd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vgetexpsd %xmm1, %xmm0, %xmm3
+; X64-NEXT:    vmovapd %xmm2, %xmm4
+; X64-NEXT:    vgetexpsd %xmm1, %xmm0, %xmm4 {%k1}
+; X64-NEXT:    vgetexpsd {sae}, %xmm1, %xmm0, %xmm5 {%k1} {z}
+; X64-NEXT:    vaddpd %xmm3, %xmm5, %xmm3
+; X64-NEXT:    vgetexpsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vaddpd %xmm2, %xmm4, %xmm0
+; X64-NEXT:    vaddpd %xmm3, %xmm0, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_getexp_sd:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovapd %xmm2, %xmm3
+; X86-NEXT:    vgetexpsd {sae}, %xmm1, %xmm0, %xmm3 {%k1}
+; X86-NEXT:    vgetexpsd {sae}, %xmm1, %xmm0, %xmm4 {%k1} {z}
+; X86-NEXT:    vgetexpsd %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vaddpd %xmm3, %xmm2, %xmm2
+; X86-NEXT:    vgetexpsd %xmm1, %xmm0, %xmm0
+; X86-NEXT:    vaddpd %xmm0, %xmm4, %xmm0
+; X86-NEXT:    vaddpd %xmm0, %xmm2, %xmm0
+; X86-NEXT:    retl
   %res0 = call <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4)
   %res1 = call <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 8)
   %res2 = call <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> zeroinitializer, i8 %mask, i32 8)
@@ -3384,35 +4524,67 @@ define <2 x double> @test_getexp_sd(<2 x
 declare i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double>, <2 x double>, i32, i8, i32)
 
 define i8 at test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cmp_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    ## kill: def $al killed $al killed $eax
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cmp_sd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
+; X64-NEXT:    kmovw %k0, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cmp_sd:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
+; X86-NEXT:    kmovw %k0, %eax
+; X86-NEXT:    # kill: def $al killed $al killed $eax
+; X86-NEXT:    retl
 
   %res4 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 5, i8 %x3, i32 8)
   ret i8 %res4
 }
 
 define i8 at test_int_x86_avx512_mask_cmp_sd_all(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cmp_sd_all:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcmplesd %xmm1, %xmm0, %k0
-; CHECK-NEXT:    kmovw %k0, %ecx
-; CHECK-NEXT:    vcmpunordsd {sae}, %xmm1, %xmm0, %k0
-; CHECK-NEXT:    kmovw %k0, %edx
-; CHECK-NEXT:    vcmpneqsd %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT:    kmovw %k0, %esi
-; CHECK-NEXT:    vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    orb %sil, %al
-; CHECK-NEXT:    orb %dl, %al
-; CHECK-NEXT:    orb %cl, %al
-; CHECK-NEXT:    ## kill: def $al killed $al killed $eax
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cmp_sd_all:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcmplesd %xmm1, %xmm0, %k0
+; X64-NEXT:    kmovw %k0, %ecx
+; X64-NEXT:    vcmpunordsd {sae}, %xmm1, %xmm0, %k0
+; X64-NEXT:    kmovw %k0, %edx
+; X64-NEXT:    vcmpneqsd %xmm1, %xmm0, %k0 {%k1}
+; X64-NEXT:    kmovw %k0, %esi
+; X64-NEXT:    vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
+; X64-NEXT:    kmovw %k0, %eax
+; X64-NEXT:    orb %sil, %al
+; X64-NEXT:    orb %dl, %al
+; X64-NEXT:    orb %cl, %al
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cmp_sd_all:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    .cfi_offset %ebx, -8
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vcmplesd %xmm1, %xmm0, %k0
+; X86-NEXT:    kmovw %k0, %ecx
+; X86-NEXT:    vcmpunordsd {sae}, %xmm1, %xmm0, %k0
+; X86-NEXT:    kmovw %k0, %edx
+; X86-NEXT:    vcmpneqsd %xmm1, %xmm0, %k0 {%k1}
+; X86-NEXT:    kmovw %k0, %ebx
+; X86-NEXT:    vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
+; X86-NEXT:    kmovw %k0, %eax
+; X86-NEXT:    orb %bl, %al
+; X86-NEXT:    orb %dl, %al
+; X86-NEXT:    orb %cl, %al
+; X86-NEXT:    # kill: def $al killed $al killed $eax
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    retl
 
   %res1 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 2, i8 -1, i32 4)
   %res2 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 3, i8 -1, i32 8)
@@ -3428,13 +4600,22 @@ define i8 at test_int_x86_avx512_mask_cmp_s
 declare i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float>, <4 x float>, i32, i8, i32)
 
 define i8 at test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcmpunordss %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    ## kill: def $al killed $al killed $eax
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cmp_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcmpunordss %xmm1, %xmm0, %k0 {%k1}
+; X64-NEXT:    kmovw %k0, %eax
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cmp_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vcmpunordss %xmm1, %xmm0, %k0 {%k1}
+; X86-NEXT:    kmovw %k0, %eax
+; X86-NEXT:    # kill: def $al killed $al killed $eax
+; X86-NEXT:    retl
 
   %res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 %x3, i32 4)
   ret i8 %res2
@@ -3442,22 +4623,45 @@ define i8 at test_int_x86_avx512_mask_cmp_s
 
 
 define i8 at test_int_x86_avx512_mask_cmp_ss_all(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss_all:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcmpless %xmm1, %xmm0, %k0
-; CHECK-NEXT:    kmovw %k0, %ecx
-; CHECK-NEXT:    vcmpunordss {sae}, %xmm1, %xmm0, %k0
-; CHECK-NEXT:    kmovw %k0, %edx
-; CHECK-NEXT:    vcmpneqss %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT:    kmovw %k0, %esi
-; CHECK-NEXT:    vcmpnltss {sae}, %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    andb %sil, %al
-; CHECK-NEXT:    andb %dl, %al
-; CHECK-NEXT:    andb %cl, %al
-; CHECK-NEXT:    ## kill: def $al killed $al killed $eax
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cmp_ss_all:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcmpless %xmm1, %xmm0, %k0
+; X64-NEXT:    kmovw %k0, %ecx
+; X64-NEXT:    vcmpunordss {sae}, %xmm1, %xmm0, %k0
+; X64-NEXT:    kmovw %k0, %edx
+; X64-NEXT:    vcmpneqss %xmm1, %xmm0, %k0 {%k1}
+; X64-NEXT:    kmovw %k0, %esi
+; X64-NEXT:    vcmpnltss {sae}, %xmm1, %xmm0, %k0 {%k1}
+; X64-NEXT:    kmovw %k0, %eax
+; X64-NEXT:    andb %sil, %al
+; X64-NEXT:    andb %dl, %al
+; X64-NEXT:    andb %cl, %al
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cmp_ss_all:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebx
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    .cfi_offset %ebx, -8
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vcmpless %xmm1, %xmm0, %k0
+; X86-NEXT:    kmovw %k0, %ecx
+; X86-NEXT:    vcmpunordss {sae}, %xmm1, %xmm0, %k0
+; X86-NEXT:    kmovw %k0, %edx
+; X86-NEXT:    vcmpneqss %xmm1, %xmm0, %k0 {%k1}
+; X86-NEXT:    kmovw %k0, %ebx
+; X86-NEXT:    vcmpnltss {sae}, %xmm1, %xmm0, %k0 {%k1}
+; X86-NEXT:    kmovw %k0, %eax
+; X86-NEXT:    andb %bl, %al
+; X86-NEXT:    andb %dl, %al
+; X86-NEXT:    andb %cl, %al
+; X86-NEXT:    # kill: def $al killed $al killed $eax
+; X86-NEXT:    popl %ebx
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    retl
   %res1 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 2, i8 -1, i32 4)
   %res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 -1, i32 8)
   %res3 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 4, i8 %x3, i32 4)
@@ -3472,13 +4676,22 @@ define i8 at test_int_x86_avx512_mask_cmp_s
 declare <8 x double> @llvm.x86.avx512.mask.getmant.pd.512(<8 x double>, i32, <8 x double>, i8, i32)
 
 define <8 x double>@test_int_x86_avx512_mask_getmant_pd_512(<8 x double> %x0, <8 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_getmant_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vgetmantpd $11, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vgetmantpd $11, {sae}, %zmm0, %zmm0
-; CHECK-NEXT:    vaddpd %zmm0, %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_getmant_pd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vgetmantpd $11, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vgetmantpd $11, {sae}, %zmm0, %zmm0
+; X64-NEXT:    vaddpd %zmm0, %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_getmant_pd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vgetmantpd $11, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vgetmantpd $11, {sae}, %zmm0, %zmm0
+; X86-NEXT:    vaddpd %zmm0, %zmm1, %zmm0
+; X86-NEXT:    retl
   %res = call <8 x double> @llvm.x86.avx512.mask.getmant.pd.512(<8 x double> %x0, i32 11, <8 x double> %x2, i8 %x3, i32 4)
   %res1 = call <8 x double> @llvm.x86.avx512.mask.getmant.pd.512(<8 x double> %x0, i32 11, <8 x double> %x2, i8 -1, i32 8)
   %res2 = fadd <8 x double> %res, %res1
@@ -3488,13 +4701,21 @@ define <8 x double>@test_int_x86_avx512_
 declare <16 x float> @llvm.x86.avx512.mask.getmant.ps.512(<16 x float>, i32, <16 x float>, i16, i32)
 
 define <16 x float>@test_int_x86_avx512_mask_getmant_ps_512(<16 x float> %x0, <16 x float> %x2, i16 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_getmant_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vgetmantps $11, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vgetmantps $11, {sae}, %zmm0, %zmm0
-; CHECK-NEXT:    vaddps %zmm0, %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_getmant_ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vgetmantps $11, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vgetmantps $11, {sae}, %zmm0, %zmm0
+; X64-NEXT:    vaddps %zmm0, %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_getmant_ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vgetmantps $11, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vgetmantps $11, {sae}, %zmm0, %zmm0
+; X86-NEXT:    vaddps %zmm0, %zmm1, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x float> @llvm.x86.avx512.mask.getmant.ps.512(<16 x float> %x0, i32 11, <16 x float> %x2, i16 %x3, i32 4)
   %res1 = call <16 x float> @llvm.x86.avx512.mask.getmant.ps.512(<16 x float> %x0, i32 11, <16 x float> %x2, i16 -1, i32 8)
   %res2 = fadd <16 x float> %res, %res1
@@ -3504,18 +4725,32 @@ define <16 x float>@test_int_x86_avx512_
 declare <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double>, <2 x double>, i32, <2 x double>, i8, i32)
 
 define <2 x double>@test_int_x86_avx512_mask_getmant_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_getmant_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vgetmantsd $11, %xmm1, %xmm0, %xmm3
-; CHECK-NEXT:    vmovapd %xmm2, %xmm4
-; CHECK-NEXT:    vgetmantsd $11, %xmm1, %xmm0, %xmm4 {%k1}
-; CHECK-NEXT:    vgetmantsd $11, %xmm1, %xmm0, %xmm5 {%k1} {z}
-; CHECK-NEXT:    vaddpd %xmm5, %xmm4, %xmm4
-; CHECK-NEXT:    vgetmantsd $11, {sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vaddpd %xmm3, %xmm2, %xmm0
-; CHECK-NEXT:    vaddpd %xmm0, %xmm4, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_getmant_sd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vgetmantsd $11, %xmm1, %xmm0, %xmm3
+; X64-NEXT:    vmovapd %xmm2, %xmm4
+; X64-NEXT:    vgetmantsd $11, %xmm1, %xmm0, %xmm4 {%k1}
+; X64-NEXT:    vgetmantsd $11, %xmm1, %xmm0, %xmm5 {%k1} {z}
+; X64-NEXT:    vaddpd %xmm5, %xmm4, %xmm4
+; X64-NEXT:    vgetmantsd $11, {sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vaddpd %xmm3, %xmm2, %xmm0
+; X64-NEXT:    vaddpd %xmm0, %xmm4, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_getmant_sd:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovapd %xmm2, %xmm3
+; X86-NEXT:    vgetmantsd $11, %xmm1, %xmm0, %xmm3 {%k1}
+; X86-NEXT:    vgetmantsd $11, %xmm1, %xmm0, %xmm4 {%k1} {z}
+; X86-NEXT:    vaddpd %xmm4, %xmm3, %xmm3
+; X86-NEXT:    vgetmantsd $11, %xmm1, %xmm0, %xmm4
+; X86-NEXT:    vgetmantsd $11, {sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vaddpd %xmm4, %xmm2, %xmm0
+; X86-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X86-NEXT:    retl
   %res  = call <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double> %x0, <2 x double> %x1, i32 11, <2 x double> %x2, i8 %x3, i32 4)
   %res1 = call <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double> %x0, <2 x double> %x1, i32 11, <2 x double> zeroinitializer, i8 %x3, i32 4)
   %res2 = call <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double> %x0, <2 x double> %x1, i32 11, <2 x double> %x2, i8 %x3, i32 8)
@@ -3529,17 +4764,30 @@ define <2 x double>@test_int_x86_avx512_
 declare <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float>, <4 x float>, i32, <4 x float>, i8, i32)
 
 define <4 x float>@test_int_x86_avx512_mask_getmant_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_getmant_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vgetmantss $11, %xmm1, %xmm0, %xmm3
-; CHECK-NEXT:    vgetmantss $11, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vgetmantss $11, %xmm1, %xmm0, %xmm4 {%k1} {z}
-; CHECK-NEXT:    vaddps %xmm4, %xmm2, %xmm2
-; CHECK-NEXT:    vgetmantss $11, {sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vaddps %xmm3, %xmm0, %xmm0
-; CHECK-NEXT:    vaddps %xmm0, %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_getmant_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vgetmantss $11, %xmm1, %xmm0, %xmm3
+; X64-NEXT:    vgetmantss $11, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vgetmantss $11, %xmm1, %xmm0, %xmm4 {%k1} {z}
+; X64-NEXT:    vaddps %xmm4, %xmm2, %xmm2
+; X64-NEXT:    vgetmantss $11, {sae}, %xmm1, %xmm0, %xmm0
+; X64-NEXT:    vaddps %xmm3, %xmm0, %xmm0
+; X64-NEXT:    vaddps %xmm0, %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_getmant_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vgetmantss $11, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vgetmantss $11, %xmm1, %xmm0, %xmm3 {%k1} {z}
+; X86-NEXT:    vaddps %xmm3, %xmm2, %xmm2
+; X86-NEXT:    vgetmantss $11, %xmm1, %xmm0, %xmm3
+; X86-NEXT:    vgetmantss $11, {sae}, %xmm1, %xmm0, %xmm0
+; X86-NEXT:    vaddps %xmm3, %xmm0, %xmm0
+; X86-NEXT:    vaddps %xmm0, %xmm2, %xmm0
+; X86-NEXT:    retl
   %res  = call <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float> %x0, <4 x float> %x1, i32 11, <4 x float> %x2, i8 %x3, i32 4)
   %res1 = call <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float> %x0, <4 x float> %x1, i32 11, <4 x float> zeroinitializer, i8 %x3, i32 4)
   %res2 = call <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float> %x0, <4 x float> %x1, i32 11, <4 x float> %x2, i8 -1, i32 8)
@@ -3554,20 +4802,28 @@ declare <8 x double> @llvm.x86.avx512.vp
 
 define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512(<8 x double> %x0, <8 x i64> %x1) {
 ; CHECK-LABEL: test_int_x86_avx512_vpermilvar_pd_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpermilpd %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %x0, <8 x i64> %x1)
   ret <8 x double> %res
 }
 
 define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512_mask(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %mask) {
-; CHECK-LABEL: test_int_x86_avx512_vpermilvar_pd_512_mask:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermilpd %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovapd %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_vpermilvar_pd_512_mask:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermilpd %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovapd %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_vpermilvar_pd_512_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpermilpd %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovapd %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %x0, <8 x i64> %x1)
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x double> %res, <8 x double> %x2
@@ -3575,11 +4831,18 @@ define <8 x double>@test_int_x86_avx512_
 }
 
 define <8 x double>@test_int_x86_avx512_vpermilvar_pd_512_maskz(<8 x double> %x0, <8 x i64> %x1, i8 %mask) {
-; CHECK-LABEL: test_int_x86_avx512_vpermilvar_pd_512_maskz:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermilpd %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_vpermilvar_pd_512_maskz:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermilpd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_vpermilvar_pd_512_maskz:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpermilpd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <8 x double> @llvm.x86.avx512.vpermilvar.pd.512(<8 x double> %x0, <8 x i64> %x1)
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x double> %res, <8 x double> zeroinitializer
@@ -3590,20 +4853,27 @@ declare <16 x float> @llvm.x86.avx512.vp
 
 define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512(<16 x float> %x0, <16 x i32> %x1) {
 ; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpermilps %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> %x1)
   ret <16 x float> %res
 }
 
 define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_mask(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %mask) {
-; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_mask:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermilps %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_vpermilvar_ps_512_mask:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermilps %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_vpermilvar_ps_512_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermilps %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> %x1)
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x float> %res, <16 x float> %x2
@@ -3611,11 +4881,17 @@ define <16 x float>@test_int_x86_avx512_
 }
 
 define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_maskz(<16 x float> %x0, <16 x i32> %x1, i16 %mask) {
-; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_maskz:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermilps %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_vpermilvar_ps_512_maskz:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermilps %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_vpermilvar_ps_512_maskz:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermilps %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> %x1)
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x float> %res, <16 x float> zeroinitializer
@@ -3625,20 +4901,27 @@ define <16 x float>@test_int_x86_avx512_
 ; Test case to make sure we can print shuffle decode comments for constant pool loads.
 define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool(<16 x float> %x0, <16 x i32> %x1) {
 ; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpermilps {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 1, i32 0>)
   ret <16 x float> %res
 }
 
 define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %mask) {
-; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermilps {{.*#+}} zmm2 {%k1} = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
-; CHECK-NEXT:    vmovaps %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermilps {{.*#+}} zmm2 {%k1} = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
+; X64-NEXT:    vmovaps %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool_mask:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermilps {{.*#+}} zmm2 {%k1} = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 1, i32 0>)
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x float> %res, <16 x float> %x2
@@ -3646,11 +4929,17 @@ define <16 x float>@test_int_x86_avx512_
 }
 
 define <16 x float>@test_int_x86_avx512_vpermilvar_ps_512_constant_pool_maskz(<16 x float> %x0, <16 x i32> %x1, i16 %mask) {
-; CHECK-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool_maskz:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool_maskz:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_vpermilvar_ps_512_constant_pool_maskz:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
+; X86-NEXT:    retl
   %res = call <16 x float> @llvm.x86.avx512.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 1, i32 0>)
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x float> %res, <16 x float> zeroinitializer
@@ -3660,13 +4949,22 @@ define <16 x float>@test_int_x86_avx512_
 declare <2 x double> @llvm.x86.avx512.mask.cvtss2sd.round(<2 x double>, <4 x float>, <2 x double>, i8, i32)
 
 define <2 x double>@test_int_x86_avx512_mask_cvt_ss2sd_round(<2 x double> %x0,<4 x float> %x1, <2 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ss2sd_round:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvtss2sd %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vcvtss2sd {sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vaddpd %xmm0, %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cvt_ss2sd_round:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvtss2sd %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vcvtss2sd {sae}, %xmm1, %xmm0, %xmm0
+; X64-NEXT:    vaddpd %xmm0, %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cvt_ss2sd_round:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vcvtss2sd %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vcvtss2sd {sae}, %xmm1, %xmm0, %xmm0
+; X86-NEXT:    vaddpd %xmm0, %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx512.mask.cvtss2sd.round(<2 x double> %x0, <4 x float> %x1, <2 x double> %x2, i8 %x3, i32 4)
   %res1 = call <2 x double> @llvm.x86.avx512.mask.cvtss2sd.round(<2 x double> %x0, <4 x float> %x1, <2 x double> %x2, i8 -1, i32 8)
   %res2 = fadd <2 x double> %res, %res1
@@ -3676,13 +4974,22 @@ define <2 x double>@test_int_x86_avx512_
 declare <4 x float> @llvm.x86.avx512.mask.cvtsd2ss.round(<4 x float>, <2 x double>, <4 x float>, i8, i32)
 
 define <4 x float>@test_int_x86_avx512_mask_cvt_sd2ss_round(<4 x float> %x0,<2 x double> %x1, <4 x float> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_cvt_sd2ss_round:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vcvtsd2ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vcvtsd2ss {rn-sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vaddps %xmm0, %xmm2, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_cvt_sd2ss_round:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vcvtsd2ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vcvtsd2ss {rn-sae}, %xmm1, %xmm0, %xmm0
+; X64-NEXT:    vaddps %xmm0, %xmm2, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_cvt_sd2ss_round:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vcvtsd2ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vcvtsd2ss {rn-sae}, %xmm1, %xmm0, %xmm0
+; X86-NEXT:    vaddps %xmm0, %xmm2, %xmm0
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.mask.cvtsd2ss.round(<4 x float> %x0, <2 x double> %x1, <4 x float> %x2, i8 %x3, i32 3)
   %res1 = call <4 x float> @llvm.x86.avx512.mask.cvtsd2ss.round(<4 x float> %x0, <2 x double> %x1, <4 x float> %x2, i8 -1, i32 8)
   %res2 = fadd <4 x float> %res, %res1
@@ -3692,14 +4999,23 @@ define <4 x float>@test_int_x86_avx512_m
 declare <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i32)
 
 define <16 x i32>@test_int_x86_avx512_mask_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pternlog_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm3
-; CHECK-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1}
-; CHECK-NEXT:    vpaddd %zmm3, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pternlog_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm3
+; X64-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    vpaddd %zmm3, %zmm0, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pternlog_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm3
+; X86-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm3
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    vpaddd %zmm3, %zmm0, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33)
   %2 = bitcast i16 %x4 to <16 x i1>
   %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x0
@@ -3709,14 +5025,23 @@ define <16 x i32>@test_int_x86_avx512_ma
 }
 
 define <16 x i32>@test_int_x86_avx512_maskz_pternlog_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_pternlog_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm3
-; CHECK-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddd %zmm3, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_maskz_pternlog_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm3
+; X64-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    vpaddd %zmm3, %zmm0, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_maskz_pternlog_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm3
+; X86-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm3
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpternlogd $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    vpaddd %zmm3, %zmm0, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x i32> @llvm.x86.avx512.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33)
   %2 = bitcast i16 %x4 to <16 x i1>
   %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> zeroinitializer
@@ -3728,14 +5053,24 @@ define <16 x i32>@test_int_x86_avx512_ma
 declare <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i32)
 
 define <8 x i64>@test_int_x86_avx512_mask_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_mask_pternlog_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm3
-; CHECK-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1}
-; CHECK-NEXT:    vpaddq %zmm3, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_pternlog_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm3
+; X64-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    vpaddq %zmm3, %zmm0, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_pternlog_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm3
+; X86-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm3
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    vpaddq %zmm3, %zmm0, %zmm0
+; X86-NEXT:    retl
   %1 = call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i32 33)
   %2 = bitcast i8 %x4 to <8 x i1>
   %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x0
@@ -3745,14 +5080,24 @@ define <8 x i64>@test_int_x86_avx512_mas
 }
 
 define <8 x i64>@test_int_x86_avx512_maskz_pternlog_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_pternlog_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovdqa64 %zmm0, %zmm3
-; CHECK-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddq %zmm3, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_maskz_pternlog_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovdqa64 %zmm0, %zmm3
+; X64-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    vpaddq %zmm3, %zmm0, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_maskz_pternlog_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa64 %zmm0, %zmm3
+; X86-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm3
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpternlogq $33, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    vpaddq %zmm3, %zmm0, %zmm0
+; X86-NEXT:    retl
   %1 = call <8 x i64> @llvm.x86.avx512.pternlog.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i32 33)
   %2 = bitcast i8 %x4 to <8 x i1>
   %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> zeroinitializer
@@ -3763,80 +5108,80 @@ define <8 x i64>@test_int_x86_avx512_mas
 
 define i32 @test_x86_avx512_comi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_comi_sd_eq_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpeqsd {sae}, %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 0, i32 8)
   ret i32 %res
 }
 
 define i32 @test_x86_avx512_ucomi_sd_eq_sae(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_ucomi_sd_eq_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpeq_uqsd {sae}, %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 8, i32 8)
   ret i32 %res
 }
 
 define i32 @test_x86_avx512_comi_sd_eq(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_comi_sd_eq:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpeqsd %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 0, i32 4)
   ret i32 %res
 }
 
 define i32 @test_x86_avx512_ucomi_sd_eq(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_ucomi_sd_eq:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpeq_uqsd %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 8, i32 4)
   ret i32 %res
 }
 
 define i32 @test_x86_avx512_comi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_comi_sd_lt_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpltsd {sae}, %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 1, i32 8)
   ret i32 %res
 }
 
 define i32 @test_x86_avx512_ucomi_sd_lt_sae(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_ucomi_sd_lt_sae:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpngesd {sae}, %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 9, i32 8)
   ret i32 %res
 }
 
 define i32 @test_x86_avx512_comi_sd_lt(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_comi_sd_lt:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpltsd %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 1, i32 4)
   ret i32 %res
 }
 
 define i32 @test_x86_avx512_ucomi_sd_lt(<2 x double> %a0, <2 x double> %a1) {
 ; CHECK-LABEL: test_x86_avx512_ucomi_sd_lt:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpngesd %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.avx512.vcomi.sd(<2 x double> %a0, <2 x double> %a1, i32 9, i32 4)
   ret i32 %res
 }
@@ -3845,10 +5190,10 @@ declare i32 @llvm.x86.avx512.vcomi.sd(<2
 
 define i32 @test_x86_avx512_ucomi_ss_lt(<4 x float> %a0, <4 x float> %a1) {
 ; CHECK-LABEL: test_x86_avx512_ucomi_ss_lt:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vcmpngess %xmm1, %xmm0, %k0
 ; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call i32 @llvm.x86.avx512.vcomi.ss(<4 x float> %a0, <4 x float> %a1, i32 9, i32 4)
   ret i32 %res
 }
@@ -3858,15 +5203,26 @@ declare i32 @llvm.x86.avx512.vcomi.ss(<4
 declare <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double>, <8 x i64>)
 
 define <8 x double>@test_int_x86_avx512_mask_permvar_df_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_permvar_df_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpermpd %zmm0, %zmm1, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermpd %zmm0, %zmm1, %zmm2 {%k1}
-; CHECK-NEXT:    vpermpd %zmm0, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vaddpd %zmm0, %zmm2, %zmm0
-; CHECK-NEXT:    vaddpd %zmm3, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_permvar_df_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vpermpd %zmm0, %zmm1, %zmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermpd %zmm0, %zmm1, %zmm2 {%k1}
+; X64-NEXT:    vpermpd %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    vaddpd %zmm0, %zmm2, %zmm0
+; X64-NEXT:    vaddpd %zmm3, %zmm0, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_permvar_df_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vpermpd %zmm0, %zmm1, %zmm3
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpermpd %zmm0, %zmm1, %zmm2 {%k1}
+; X86-NEXT:    vpermpd %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    vaddpd %zmm0, %zmm2, %zmm0
+; X86-NEXT:    vaddpd %zmm3, %zmm0, %zmm0
+; X86-NEXT:    retl
   %1 = call <8 x double> @llvm.x86.avx512.permvar.df.512(<8 x double> %x0, <8 x i64> %x1)
   %2 = bitcast i8 %x3 to <8 x i1>
   %3 = select <8 x i1> %2, <8 x double> %1, <8 x double> %x2
@@ -3882,15 +5238,26 @@ define <8 x double>@test_int_x86_avx512_
 declare <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64>, <8 x i64>)
 
 define <8 x i64>@test_int_x86_avx512_mask_permvar_di_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_permvar_di_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpermq %zmm0, %zmm1, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermq %zmm0, %zmm1, %zmm2 {%k1}
-; CHECK-NEXT:    vpermq %zmm0, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddq %zmm3, %zmm0, %zmm0
-; CHECK-NEXT:    vpaddq %zmm0, %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_permvar_di_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vpermq %zmm0, %zmm1, %zmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermq %zmm0, %zmm1, %zmm2 {%k1}
+; X64-NEXT:    vpermq %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    vpaddq %zmm3, %zmm0, %zmm0
+; X64-NEXT:    vpaddq %zmm0, %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_permvar_di_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vpermq %zmm0, %zmm1, %zmm3
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpermq %zmm0, %zmm1, %zmm2 {%k1}
+; X86-NEXT:    vpermq %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    vpaddq %zmm3, %zmm0, %zmm0
+; X86-NEXT:    vpaddq %zmm0, %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> %x0, <8 x i64> %x1)
   %2 = bitcast i8 %x3 to <8 x i1>
   %3 = select <8 x i1> %2, <8 x i64> %1, <8 x i64> %x2
@@ -3906,15 +5273,25 @@ define <8 x i64>@test_int_x86_avx512_mas
 declare <16 x float> @llvm.x86.avx512.permvar.sf.512(<16 x float>, <16 x i32>)
 
 define <16 x float>@test_int_x86_avx512_mask_permvar_sf_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_permvar_sf_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpermps %zmm0, %zmm1, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermps %zmm0, %zmm1, %zmm2 {%k1}
-; CHECK-NEXT:    vpermps %zmm0, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vaddps %zmm0, %zmm2, %zmm0
-; CHECK-NEXT:    vaddps %zmm3, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_permvar_sf_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vpermps %zmm0, %zmm1, %zmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermps %zmm0, %zmm1, %zmm2 {%k1}
+; X64-NEXT:    vpermps %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    vaddps %zmm0, %zmm2, %zmm0
+; X64-NEXT:    vaddps %zmm3, %zmm0, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_permvar_sf_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vpermps %zmm0, %zmm1, %zmm3
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermps %zmm0, %zmm1, %zmm2 {%k1}
+; X86-NEXT:    vpermps %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    vaddps %zmm0, %zmm2, %zmm0
+; X86-NEXT:    vaddps %zmm3, %zmm0, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x float> @llvm.x86.avx512.permvar.sf.512(<16 x float> %x0, <16 x i32> %x1)
   %2 = bitcast i16 %x3 to <16 x i1>
   %3 = select <16 x i1> %2, <16 x float> %1, <16 x float> %x2
@@ -3930,15 +5307,25 @@ define <16 x float>@test_int_x86_avx512_
 declare <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32>, <16 x i32>)
 
 define <16 x i32>@test_int_x86_avx512_mask_permvar_si_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
-; CHECK-LABEL: test_int_x86_avx512_mask_permvar_si_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vpermd %zmm0, %zmm1, %zmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpermd %zmm0, %zmm1, %zmm2 {%k1}
-; CHECK-NEXT:    vpermd %zmm0, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vpaddd %zmm3, %zmm0, %zmm0
-; CHECK-NEXT:    vpaddd %zmm0, %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_permvar_si_512:
+; X64:       # %bb.0:
+; X64-NEXT:    vpermd %zmm0, %zmm1, %zmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpermd %zmm0, %zmm1, %zmm2 {%k1}
+; X64-NEXT:    vpermd %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    vpaddd %zmm3, %zmm0, %zmm0
+; X64-NEXT:    vpaddd %zmm0, %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_permvar_si_512:
+; X86:       # %bb.0:
+; X86-NEXT:    vpermd %zmm0, %zmm1, %zmm3
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpermd %zmm0, %zmm1, %zmm2 {%k1}
+; X86-NEXT:    vpermd %zmm0, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    vpaddd %zmm3, %zmm0, %zmm0
+; X86-NEXT:    vpaddd %zmm0, %zmm2, %zmm0
+; X86-NEXT:    retl
   %1 = call <16 x i32> @llvm.x86.avx512.permvar.si.512(<16 x i32> %x0, <16 x i32> %x1)
   %2 = bitcast i16 %x3 to <16 x i1>
   %3 = select <16 x i1> %2, <16 x i32> %1, <16 x i32> %x2
@@ -3954,17 +5341,30 @@ define <16 x i32>@test_int_x86_avx512_ma
 declare <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double>, <8 x double>, <8 x i64>, i32, i8, i32)
 
 define <8 x double>@test_int_x86_avx512_mask_fixupimm_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i8 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovapd %zmm0, %zmm3
-; CHECK-NEXT:    vfixupimmpd $4, %zmm2, %zmm1, %zmm3 {%k1}
-; CHECK-NEXT:    vxorpd %xmm4, %xmm4, %xmm4
-; CHECK-NEXT:    vfixupimmpd $5, %zmm2, %zmm1, %zmm4 {%k1} {z}
-; CHECK-NEXT:    vaddpd %zmm4, %zmm3, %zmm3
-; CHECK-NEXT:    vfixupimmpd $3, {sae}, %zmm2, %zmm1, %zmm0
-; CHECK-NEXT:    vaddpd %zmm0, %zmm3, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_fixupimm_pd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovapd %zmm0, %zmm3
+; X64-NEXT:    vfixupimmpd $4, %zmm2, %zmm1, %zmm3 {%k1}
+; X64-NEXT:    vxorpd %xmm4, %xmm4, %xmm4
+; X64-NEXT:    vfixupimmpd $5, %zmm2, %zmm1, %zmm4 {%k1} {z}
+; X64-NEXT:    vaddpd %zmm4, %zmm3, %zmm3
+; X64-NEXT:    vfixupimmpd $3, {sae}, %zmm2, %zmm1, %zmm0
+; X64-NEXT:    vaddpd %zmm0, %zmm3, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_fixupimm_pd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovapd %zmm0, %zmm3
+; X86-NEXT:    vfixupimmpd $4, %zmm2, %zmm1, %zmm3 {%k1}
+; X86-NEXT:    vxorpd %xmm4, %xmm4, %xmm4
+; X86-NEXT:    vfixupimmpd $5, %zmm2, %zmm1, %zmm4 {%k1} {z}
+; X86-NEXT:    vaddpd %zmm4, %zmm3, %zmm3
+; X86-NEXT:    vfixupimmpd $3, {sae}, %zmm2, %zmm1, %zmm0
+; X86-NEXT:    vaddpd %zmm0, %zmm3, %zmm0
+; X86-NEXT:    retl
   %res = call <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 4, i8 %x4, i32 4)
   %res1 = call <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double> zeroinitializer, <8 x double> %x1, <8 x i64> %x2, i32 5, i8 %x4, i32 4)
   %res2 = call <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 3, i8 -1, i32 8)
@@ -3974,10 +5374,16 @@ define <8 x double>@test_int_x86_avx512_
 }
 
 define <8 x double>@test_int_x86_avx512_mask_fixupimm_pd_512_load(<8 x double> %x0, <8 x double> %x1, <8 x i64>* %x2ptr) {
-; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_pd_512_load:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vfixupimmpd $3, (%rdi), %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_fixupimm_pd_512_load:
+; X64:       # %bb.0:
+; X64-NEXT:    vfixupimmpd $3, (%rdi), %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_fixupimm_pd_512_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vfixupimmpd $3, (%eax), %zmm1, %zmm0
+; X86-NEXT:    retl
   %x2 = load <8 x i64>, <8 x i64>* %x2ptr
   %res = call <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 3, i8 -1, i32 4)
   ret <8 x double> %res
@@ -3986,18 +5392,32 @@ define <8 x double>@test_int_x86_avx512_
 declare <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double>, <8 x double>, <8 x i64>, i32, i8, i32)
 
 define <8 x double>@test_int_x86_avx512_maskz_fixupimm_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i8 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_pd_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovapd %zmm0, %zmm3
-; CHECK-NEXT:    vfixupimmpd $3, %zmm2, %zmm1, %zmm3 {%k1} {z}
-; CHECK-NEXT:    vxorpd %xmm4, %xmm4, %xmm4
-; CHECK-NEXT:    vmovapd %zmm0, %zmm5
-; CHECK-NEXT:    vfixupimmpd $5, %zmm4, %zmm1, %zmm5 {%k1} {z}
-; CHECK-NEXT:    vaddpd %zmm5, %zmm3, %zmm3
-; CHECK-NEXT:    vfixupimmpd $2, {sae}, %zmm2, %zmm1, %zmm0
-; CHECK-NEXT:    vaddpd %zmm0, %zmm3, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_maskz_fixupimm_pd_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovapd %zmm0, %zmm3
+; X64-NEXT:    vfixupimmpd $3, %zmm2, %zmm1, %zmm3 {%k1} {z}
+; X64-NEXT:    vxorpd %xmm4, %xmm4, %xmm4
+; X64-NEXT:    vmovapd %zmm0, %zmm5
+; X64-NEXT:    vfixupimmpd $5, %zmm4, %zmm1, %zmm5 {%k1} {z}
+; X64-NEXT:    vaddpd %zmm5, %zmm3, %zmm3
+; X64-NEXT:    vfixupimmpd $2, {sae}, %zmm2, %zmm1, %zmm0
+; X64-NEXT:    vaddpd %zmm0, %zmm3, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_maskz_fixupimm_pd_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovapd %zmm0, %zmm3
+; X86-NEXT:    vfixupimmpd $3, %zmm2, %zmm1, %zmm3 {%k1} {z}
+; X86-NEXT:    vxorpd %xmm4, %xmm4, %xmm4
+; X86-NEXT:    vmovapd %zmm0, %zmm5
+; X86-NEXT:    vfixupimmpd $5, %zmm4, %zmm1, %zmm5 {%k1} {z}
+; X86-NEXT:    vaddpd %zmm5, %zmm3, %zmm3
+; X86-NEXT:    vfixupimmpd $2, {sae}, %zmm2, %zmm1, %zmm0
+; X86-NEXT:    vaddpd %zmm0, %zmm3, %zmm0
+; X86-NEXT:    retl
   %res = call <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 3, i8 %x4, i32 4)
   %res1 = call <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> zeroinitializer, i32 5, i8 %x4, i32 4)
   %res2 = call <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 2, i8 -1, i32 8)
@@ -4009,18 +5429,32 @@ define <8 x double>@test_int_x86_avx512_
 declare <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float>, <4 x float>, <4 x i32>, i32, i8, i32)
 
 define <4 x float>@test_int_x86_avx512_mask_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovaps %xmm0, %xmm3
-; CHECK-NEXT:    vfixupimmss $5, %xmm2, %xmm1, %xmm3 {%k1}
-; CHECK-NEXT:    vxorps %xmm4, %xmm4, %xmm4
-; CHECK-NEXT:    vmovaps %xmm0, %xmm5
-; CHECK-NEXT:    vfixupimmss $5, %xmm4, %xmm1, %xmm5 {%k1}
-; CHECK-NEXT:    vaddps %xmm5, %xmm3, %xmm3
-; CHECK-NEXT:    vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0
-; CHECK-NEXT:    vaddps %xmm0, %xmm3, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_fixupimm_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovaps %xmm0, %xmm3
+; X64-NEXT:    vfixupimmss $5, %xmm2, %xmm1, %xmm3 {%k1}
+; X64-NEXT:    vxorps %xmm4, %xmm4, %xmm4
+; X64-NEXT:    vmovaps %xmm0, %xmm5
+; X64-NEXT:    vfixupimmss $5, %xmm4, %xmm1, %xmm5 {%k1}
+; X64-NEXT:    vaddps %xmm5, %xmm3, %xmm3
+; X64-NEXT:    vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0
+; X64-NEXT:    vaddps %xmm0, %xmm3, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_fixupimm_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovaps %xmm0, %xmm3
+; X86-NEXT:    vfixupimmss $5, %xmm2, %xmm1, %xmm3 {%k1}
+; X86-NEXT:    vxorps %xmm4, %xmm4, %xmm4
+; X86-NEXT:    vmovaps %xmm0, %xmm5
+; X86-NEXT:    vfixupimmss $5, %xmm4, %xmm1, %xmm5 {%k1}
+; X86-NEXT:    vaddps %xmm5, %xmm3, %xmm3
+; X86-NEXT:    vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0
+; X86-NEXT:    vaddps %xmm0, %xmm3, %xmm0
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 %x4, i32 4)
   %res1 = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> zeroinitializer, i32 5, i8 %x4, i32 4)
   %res2 = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 -1, i32 8)
@@ -4032,18 +5466,32 @@ define <4 x float>@test_int_x86_avx512_m
 declare <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float>, <4 x float>, <4 x i32>, i32, i8, i32)
 
 define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovaps %xmm0, %xmm3
-; CHECK-NEXT:    vfixupimmss $5, %xmm2, %xmm1, %xmm3
-; CHECK-NEXT:    vmovaps %xmm0, %xmm4
-; CHECK-NEXT:    vfixupimmss $5, %xmm2, %xmm1, %xmm4 {%k1} {z}
-; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; CHECK-NEXT:    vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
-; CHECK-NEXT:    vaddps %xmm0, %xmm4, %xmm0
-; CHECK-NEXT:    vaddps %xmm3, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_maskz_fixupimm_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovaps %xmm0, %xmm3
+; X64-NEXT:    vfixupimmss $5, %xmm2, %xmm1, %xmm3
+; X64-NEXT:    vmovaps %xmm0, %xmm4
+; X64-NEXT:    vfixupimmss $5, %xmm2, %xmm1, %xmm4 {%k1} {z}
+; X64-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; X64-NEXT:    vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT:    vaddps %xmm0, %xmm4, %xmm0
+; X64-NEXT:    vaddps %xmm3, %xmm0, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_maskz_fixupimm_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovaps %xmm0, %xmm3
+; X86-NEXT:    vfixupimmss $5, %xmm2, %xmm1, %xmm3 {%k1} {z}
+; X86-NEXT:    vmovaps %xmm0, %xmm4
+; X86-NEXT:    vfixupimmss $5, %xmm2, %xmm1, %xmm4
+; X86-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; X86-NEXT:    vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X86-NEXT:    vaddps %xmm0, %xmm3, %xmm0
+; X86-NEXT:    vaddps %xmm4, %xmm0, %xmm0
+; X86-NEXT:    retl
   %res = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 %x4, i32 4)
   %res1 = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> zeroinitializer, i32 5, i8 %x4, i32 8)
   %res2 = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 -1, i32 4)
@@ -4055,18 +5503,31 @@ define <4 x float>@test_int_x86_avx512_m
 declare <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float>, <16 x float>, <16 x i32>, i32, i16, i32)
 
 define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i16 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovaps %zmm0, %zmm3
-; CHECK-NEXT:    vfixupimmps $5, %zmm2, %zmm1, %zmm3 {%k1}
-; CHECK-NEXT:    vxorps %xmm4, %xmm4, %xmm4
-; CHECK-NEXT:    vmovaps %zmm0, %zmm5
-; CHECK-NEXT:    vfixupimmps $5, %zmm4, %zmm1, %zmm5 {%k1}
-; CHECK-NEXT:    vaddps %zmm5, %zmm3, %zmm3
-; CHECK-NEXT:    vfixupimmps $5, {sae}, %zmm2, %zmm1, %zmm0
-; CHECK-NEXT:    vaddps %zmm0, %zmm3, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_fixupimm_ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovaps %zmm0, %zmm3
+; X64-NEXT:    vfixupimmps $5, %zmm2, %zmm1, %zmm3 {%k1}
+; X64-NEXT:    vxorps %xmm4, %xmm4, %xmm4
+; X64-NEXT:    vmovaps %zmm0, %zmm5
+; X64-NEXT:    vfixupimmps $5, %zmm4, %zmm1, %zmm5 {%k1}
+; X64-NEXT:    vaddps %zmm5, %zmm3, %zmm3
+; X64-NEXT:    vfixupimmps $5, {sae}, %zmm2, %zmm1, %zmm0
+; X64-NEXT:    vaddps %zmm0, %zmm3, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_fixupimm_ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmovaps %zmm0, %zmm3
+; X86-NEXT:    vfixupimmps $5, %zmm2, %zmm1, %zmm3 {%k1}
+; X86-NEXT:    vxorps %xmm4, %xmm4, %xmm4
+; X86-NEXT:    vmovaps %zmm0, %zmm5
+; X86-NEXT:    vfixupimmps $5, %zmm4, %zmm1, %zmm5 {%k1}
+; X86-NEXT:    vaddps %zmm5, %zmm3, %zmm3
+; X86-NEXT:    vfixupimmps $5, {sae}, %zmm2, %zmm1, %zmm0
+; X86-NEXT:    vaddps %zmm0, %zmm3, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i32 5, i16 %x4, i32 4)
   %res1 = call <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> zeroinitializer, i32 5, i16 %x4, i32 4)
   %res2 = call <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i32 5, i16 -1, i32 8)
@@ -4076,10 +5537,16 @@ define <16 x float>@test_int_x86_avx512_
 }
 
 define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512_load(<16 x float> %x0, <16 x float> %x1, <16 x i32>* %x2ptr) {
-; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_ps_512_load:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vfixupimmps $5, (%rdi), %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_fixupimm_ps_512_load:
+; X64:       # %bb.0:
+; X64-NEXT:    vfixupimmps $5, (%rdi), %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_fixupimm_ps_512_load:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vfixupimmps $5, (%eax), %zmm1, %zmm0
+; X86-NEXT:    retl
   %x2 = load <16 x i32>, <16 x i32>* %x2ptr
   %res = call <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i32 5, i16 -1, i32 4)
   ret <16 x float> %res
@@ -4088,18 +5555,31 @@ define <16 x float>@test_int_x86_avx512_
 declare <16 x float> @llvm.x86.avx512.maskz.fixupimm.ps.512(<16 x float>, <16 x float>, <16 x i32>, i32, i16, i32)
 
 define <16 x float>@test_int_x86_avx512_maskz_fixupimm_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i16 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_ps_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovaps %zmm0, %zmm3
-; CHECK-NEXT:    vfixupimmps $5, %zmm2, %zmm1, %zmm3
-; CHECK-NEXT:    vmovaps %zmm0, %zmm4
-; CHECK-NEXT:    vfixupimmps $5, %zmm2, %zmm1, %zmm4 {%k1} {z}
-; CHECK-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; CHECK-NEXT:    vfixupimmps $5, {sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT:    vaddps %zmm0, %zmm4, %zmm0
-; CHECK-NEXT:    vaddps %zmm3, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_maskz_fixupimm_ps_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovaps %zmm0, %zmm3
+; X64-NEXT:    vfixupimmps $5, %zmm2, %zmm1, %zmm3
+; X64-NEXT:    vmovaps %zmm0, %zmm4
+; X64-NEXT:    vfixupimmps $5, %zmm2, %zmm1, %zmm4 {%k1} {z}
+; X64-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; X64-NEXT:    vfixupimmps $5, {sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X64-NEXT:    vaddps %zmm0, %zmm4, %zmm0
+; X64-NEXT:    vaddps %zmm3, %zmm0, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_maskz_fixupimm_ps_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vmovaps %zmm0, %zmm3
+; X86-NEXT:    vfixupimmps $5, %zmm2, %zmm1, %zmm3 {%k1} {z}
+; X86-NEXT:    vmovaps %zmm0, %zmm4
+; X86-NEXT:    vfixupimmps $5, %zmm2, %zmm1, %zmm4
+; X86-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; X86-NEXT:    vfixupimmps $5, {sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
+; X86-NEXT:    vaddps %zmm0, %zmm3, %zmm0
+; X86-NEXT:    vaddps %zmm4, %zmm0, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x float> @llvm.x86.avx512.maskz.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i32 5, i16 %x4, i32 4)
   %res1 = call <16 x float> @llvm.x86.avx512.maskz.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> zeroinitializer, i32 5, i16 %x4, i32 8)
   %res2 = call <16 x float> @llvm.x86.avx512.maskz.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i32 5, i16 -1, i32 4)
@@ -4111,18 +5591,32 @@ define <16 x float>@test_int_x86_avx512_
 declare <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double>, <2 x double>, <2 x i64>, i32, i8, i32)
 
 define <2 x double>@test_int_x86_avx512_mask_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovapd %xmm0, %xmm3
-; CHECK-NEXT:    vfixupimmsd $5, %xmm2, %xmm1, %xmm3
-; CHECK-NEXT:    vmovapd %xmm0, %xmm4
-; CHECK-NEXT:    vfixupimmsd $5, %xmm2, %xmm1, %xmm4 {%k1}
-; CHECK-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
-; CHECK-NEXT:    vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT:    vaddpd %xmm0, %xmm4, %xmm0
-; CHECK-NEXT:    vaddpd %xmm3, %xmm0, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_fixupimm_sd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovapd %xmm0, %xmm3
+; X64-NEXT:    vfixupimmsd $5, %xmm2, %xmm1, %xmm3
+; X64-NEXT:    vmovapd %xmm0, %xmm4
+; X64-NEXT:    vfixupimmsd $5, %xmm2, %xmm1, %xmm4 {%k1}
+; X64-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
+; X64-NEXT:    vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    vaddpd %xmm0, %xmm4, %xmm0
+; X64-NEXT:    vaddpd %xmm3, %xmm0, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_fixupimm_sd:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovapd %xmm0, %xmm3
+; X86-NEXT:    vfixupimmsd $5, %xmm2, %xmm1, %xmm3 {%k1}
+; X86-NEXT:    vmovapd %xmm0, %xmm4
+; X86-NEXT:    vfixupimmsd $5, %xmm2, %xmm1, %xmm4
+; X86-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
+; X86-NEXT:    vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X86-NEXT:    vaddpd %xmm4, %xmm0, %xmm0
+; X86-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 %x4, i32 4)
   %res1 = call <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> zeroinitializer, i32 5, i8 %x4, i32 8)
   %res2 = call <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 -1, i32 4)
@@ -4134,18 +5628,32 @@ define <2 x double>@test_int_x86_avx512_
 declare <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double>, <2 x double>, <2 x i64>, i32, i8, i32)
 
 define <2 x double>@test_int_x86_avx512_maskz_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovapd %xmm0, %xmm3
-; CHECK-NEXT:    vfixupimmsd $5, %xmm2, %xmm1, %xmm3 {%k1} {z}
-; CHECK-NEXT:    vxorpd %xmm4, %xmm4, %xmm4
-; CHECK-NEXT:    vmovapd %xmm0, %xmm5
-; CHECK-NEXT:    vfixupimmsd $5, {sae}, %xmm4, %xmm1, %xmm5 {%k1} {z}
-; CHECK-NEXT:    vaddpd %xmm5, %xmm3, %xmm3
-; CHECK-NEXT:    vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
-; CHECK-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_maskz_fixupimm_sd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovapd %xmm0, %xmm3
+; X64-NEXT:    vfixupimmsd $5, %xmm2, %xmm1, %xmm3 {%k1} {z}
+; X64-NEXT:    vxorpd %xmm4, %xmm4, %xmm4
+; X64-NEXT:    vmovapd %xmm0, %xmm5
+; X64-NEXT:    vfixupimmsd $5, {sae}, %xmm4, %xmm1, %xmm5 {%k1} {z}
+; X64-NEXT:    vaddpd %xmm5, %xmm3, %xmm3
+; X64-NEXT:    vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_maskz_fixupimm_sd:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovapd %xmm0, %xmm3
+; X86-NEXT:    vfixupimmsd $5, %xmm2, %xmm1, %xmm3 {%k1} {z}
+; X86-NEXT:    vxorpd %xmm4, %xmm4, %xmm4
+; X86-NEXT:    vmovapd %xmm0, %xmm5
+; X86-NEXT:    vfixupimmsd $5, {sae}, %xmm4, %xmm1, %xmm5 {%k1} {z}
+; X86-NEXT:    vaddpd %xmm5, %xmm3, %xmm3
+; X86-NEXT:    vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X86-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X86-NEXT:    retl
   %res = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 %x4, i32 4)
   %res1 = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> zeroinitializer, i32 5, i8 %x4, i32 8)
   %res2 = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 %x4, i32 8)
@@ -4158,20 +5666,36 @@ declare double @llvm.fma.f64(double, dou
 declare double @llvm.x86.avx512.vfmadd.f64(double, double, double, i32) #0
 
 define <2 x double>@test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
-; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovapd %xmm0, %xmm3
-; CHECK-NEXT:    vfmadd213sd {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovapd %xmm0, %xmm4
-; CHECK-NEXT:    vfmadd213sd {{.*#+}} xmm4 = (xmm1 * xmm4) + xmm2
-; CHECK-NEXT:    vaddpd %xmm4, %xmm3, %xmm3
-; CHECK-NEXT:    vmovapd %xmm0, %xmm4
-; CHECK-NEXT:    vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm4
-; CHECK-NEXT:    vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT:    vaddpd %xmm0, %xmm4, %xmm0
-; CHECK-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_vfmadd_sd:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovapd %xmm0, %xmm3
+; X64-NEXT:    vfmadd213sd {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovapd %xmm0, %xmm4
+; X64-NEXT:    vfmadd213sd {{.*#+}} xmm4 = (xmm1 * xmm4) + xmm2
+; X64-NEXT:    vaddpd %xmm4, %xmm3, %xmm3
+; X64-NEXT:    vmovapd %xmm0, %xmm4
+; X64-NEXT:    vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm4
+; X64-NEXT:    vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    vaddpd %xmm0, %xmm4, %xmm0
+; X64-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_vfmadd_sd:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    vmovapd %xmm0, %xmm3
+; X86-NEXT:    vfmadd213sd {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovapd %xmm0, %xmm4
+; X86-NEXT:    vfmadd213sd {{.*#+}} xmm4 = (xmm1 * xmm4) + xmm2
+; X86-NEXT:    vaddpd %xmm4, %xmm3, %xmm3
+; X86-NEXT:    vmovapd %xmm0, %xmm4
+; X86-NEXT:    vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm4
+; X86-NEXT:    vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT:    vaddpd %xmm0, %xmm4, %xmm0
+; X86-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X86-NEXT:    retl
   %1 = extractelement <2 x double> %x0, i64 0
   %2 = extractelement <2 x double> %x1, i64 0
   %3 = extractelement <2 x double> %x2, i64 0
@@ -4205,20 +5729,36 @@ define <2 x double>@test_int_x86_avx512_
 }
 
 define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
-; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovaps %xmm0, %xmm3
-; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovaps %xmm0, %xmm4
-; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm4 = (xmm1 * xmm4) + xmm2
-; CHECK-NEXT:    vaddps %xmm4, %xmm3, %xmm3
-; CHECK-NEXT:    vmovaps %xmm0, %xmm4
-; CHECK-NEXT:    vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm4
-; CHECK-NEXT:    vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT:    vaddps %xmm0, %xmm4, %xmm0
-; CHECK-NEXT:    vaddps %xmm0, %xmm3, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_vfmadd_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovaps %xmm0, %xmm3
+; X64-NEXT:    vfmadd213ss {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovaps %xmm0, %xmm4
+; X64-NEXT:    vfmadd213ss {{.*#+}} xmm4 = (xmm1 * xmm4) + xmm2
+; X64-NEXT:    vaddps %xmm4, %xmm3, %xmm3
+; X64-NEXT:    vmovaps %xmm0, %xmm4
+; X64-NEXT:    vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm4
+; X64-NEXT:    vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; X64-NEXT:    vaddps %xmm0, %xmm4, %xmm0
+; X64-NEXT:    vaddps %xmm0, %xmm3, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_vfmadd_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    vmovaps %xmm0, %xmm3
+; X86-NEXT:    vfmadd213ss {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovaps %xmm0, %xmm4
+; X86-NEXT:    vfmadd213ss {{.*#+}} xmm4 = (xmm1 * xmm4) + xmm2
+; X86-NEXT:    vaddps %xmm4, %xmm3, %xmm3
+; X86-NEXT:    vmovaps %xmm0, %xmm4
+; X86-NEXT:    vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm4
+; X86-NEXT:    vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; X86-NEXT:    vaddps %xmm0, %xmm4, %xmm0
+; X86-NEXT:    vaddps %xmm0, %xmm3, %xmm0
+; X86-NEXT:    retl
   %1 = extractelement <4 x float> %x0, i64 0
   %2 = extractelement <4 x float> %x1, i64 0
   %3 = extractelement <4 x float> %x2, i64 0
@@ -4252,14 +5792,24 @@ define <4 x float>@test_int_x86_avx512_m
 }
 
 define <2 x double>@test_int_x86_avx512_maskz_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
-; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovapd %xmm0, %xmm3
-; CHECK-NEXT:    vfmadd213sd {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
-; CHECK-NEXT:    vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
-; CHECK-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_sd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovapd %xmm0, %xmm3
+; X64-NEXT:    vfmadd213sd {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
+; X64-NEXT:    vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X64-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_maskz_vfmadd_sd:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovapd %xmm0, %xmm3
+; X86-NEXT:    vfmadd213sd {{.*#+}} xmm3 = (xmm1 * xmm3) + xmm2
+; X86-NEXT:    vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
+; X86-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X86-NEXT:    retl
   %1 = extractelement <2 x double> %x0, i64 0
   %2 = extractelement <2 x double> %x1, i64 0
   %3 = extractelement <2 x double> %x2, i64 0
@@ -4284,11 +5834,18 @@ declare float @llvm.fma.f32(float, float
 declare float @llvm.x86.avx512.vfmadd.f32(float, float, float, i32) #0
 
 define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
-; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_maskz_vfmadd_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
+; X86-NEXT:    retl
   %1 = extractelement <4 x float> %x0, i64 0
   %2 = extractelement <4 x float> %x1, i64 0
   %3 = extractelement <4 x float> %x2, i64 0
@@ -4310,20 +5867,36 @@ define <4 x float>@test_int_x86_avx512_m
 }
 
 define <2 x double>@test_int_x86_avx512_mask3_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovapd %xmm2, %xmm3
-; CHECK-NEXT:    vfmadd231sd {{.*#+}} xmm3 = (xmm0 * xmm1) + xmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovapd %xmm2, %xmm4
-; CHECK-NEXT:    vfmadd231sd {{.*#+}} xmm4 = (xmm0 * xmm1) + xmm4
-; CHECK-NEXT:    vaddpd %xmm4, %xmm3, %xmm3
-; CHECK-NEXT:    vmovapd %xmm2, %xmm4
-; CHECK-NEXT:    vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm4
-; CHECK-NEXT:    vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vaddpd %xmm2, %xmm4, %xmm0
-; CHECK-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_sd:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovapd %xmm2, %xmm3
+; X64-NEXT:    vfmadd231sd {{.*#+}} xmm3 = (xmm0 * xmm1) + xmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovapd %xmm2, %xmm4
+; X64-NEXT:    vfmadd231sd {{.*#+}} xmm4 = (xmm0 * xmm1) + xmm4
+; X64-NEXT:    vaddpd %xmm4, %xmm3, %xmm3
+; X64-NEXT:    vmovapd %xmm2, %xmm4
+; X64-NEXT:    vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm4
+; X64-NEXT:    vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vaddpd %xmm2, %xmm4, %xmm0
+; X64-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask3_vfmadd_sd:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    vmovapd %xmm2, %xmm3
+; X86-NEXT:    vfmadd231sd {{.*#+}} xmm3 = (xmm0 * xmm1) + xmm3
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovapd %xmm2, %xmm4
+; X86-NEXT:    vfmadd231sd {{.*#+}} xmm4 = (xmm0 * xmm1) + xmm4
+; X86-NEXT:    vaddpd %xmm4, %xmm3, %xmm3
+; X86-NEXT:    vmovapd %xmm2, %xmm4
+; X86-NEXT:    vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm4
+; X86-NEXT:    vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vaddpd %xmm2, %xmm4, %xmm0
+; X86-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X86-NEXT:    retl
   %1 = extractelement <2 x double> %x0, i64 0
   %2 = extractelement <2 x double> %x1, i64 0
   %3 = extractelement <2 x double> %x2, i64 0
@@ -4357,20 +5930,36 @@ define <2 x double>@test_int_x86_avx512_
 }
 
 define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovaps %xmm2, %xmm3
-; CHECK-NEXT:    vfmadd231ss {{.*#+}} xmm3 = (xmm0 * xmm1) + xmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovaps %xmm2, %xmm4
-; CHECK-NEXT:    vfmadd231ss {{.*#+}} xmm4 = (xmm0 * xmm1) + xmm4
-; CHECK-NEXT:    vaddps %xmm4, %xmm3, %xmm3
-; CHECK-NEXT:    vmovaps %xmm2, %xmm4
-; CHECK-NEXT:    vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm4
-; CHECK-NEXT:    vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vaddps %xmm2, %xmm4, %xmm0
-; CHECK-NEXT:    vaddps %xmm0, %xmm3, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovaps %xmm2, %xmm3
+; X64-NEXT:    vfmadd231ss {{.*#+}} xmm3 = (xmm0 * xmm1) + xmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovaps %xmm2, %xmm4
+; X64-NEXT:    vfmadd231ss {{.*#+}} xmm4 = (xmm0 * xmm1) + xmm4
+; X64-NEXT:    vaddps %xmm4, %xmm3, %xmm3
+; X64-NEXT:    vmovaps %xmm2, %xmm4
+; X64-NEXT:    vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm4
+; X64-NEXT:    vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vaddps %xmm2, %xmm4, %xmm0
+; X64-NEXT:    vaddps %xmm0, %xmm3, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask3_vfmadd_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    vmovaps %xmm2, %xmm3
+; X86-NEXT:    vfmadd231ss {{.*#+}} xmm3 = (xmm0 * xmm1) + xmm3
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovaps %xmm2, %xmm4
+; X86-NEXT:    vfmadd231ss {{.*#+}} xmm4 = (xmm0 * xmm1) + xmm4
+; X86-NEXT:    vaddps %xmm4, %xmm3, %xmm3
+; X86-NEXT:    vmovaps %xmm2, %xmm4
+; X86-NEXT:    vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm4
+; X86-NEXT:    vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vaddps %xmm2, %xmm4, %xmm0
+; X86-NEXT:    vaddps %xmm0, %xmm3, %xmm0
+; X86-NEXT:    retl
   %1 = extractelement <4 x float> %x0, i64 0
   %2 = extractelement <4 x float> %x1, i64 0
   %3 = extractelement <4 x float> %x2, i64 0
@@ -4404,15 +5993,28 @@ define <4 x float>@test_int_x86_avx512_m
 }
 
 define void @fmadd_ss_mask_memfold(float* %a, float* %b, i8 %c) {
-; CHECK-LABEL: fmadd_ss_mask_memfold:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm1 = (xmm0 * xmm1) + xmm0
-; CHECK-NEXT:    kmovw %edx, %k1
-; CHECK-NEXT:    vmovss %xmm1, %xmm0, %xmm0 {%k1}
-; CHECK-NEXT:    vmovss %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; X64-LABEL: fmadd_ss_mask_memfold:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-NEXT:    vfmadd213ss {{.*#+}} xmm1 = (xmm0 * xmm1) + xmm0
+; X64-NEXT:    kmovw %edx, %k1
+; X64-NEXT:    vmovss %xmm1, %xmm0, %xmm0 {%k1}
+; X64-NEXT:    vmovss %xmm0, (%rdi)
+; X64-NEXT:    retq
+;
+; X86-LABEL: fmadd_ss_mask_memfold:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT:    vfmadd213ss {{.*#+}} xmm1 = (xmm0 * xmm1) + xmm0
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovss %xmm1, %xmm0, %xmm0 {%k1}
+; X86-NEXT:    vmovss %xmm0, (%edx)
+; X86-NEXT:    retl
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -4438,14 +6040,26 @@ define void @fmadd_ss_mask_memfold(float
 }
 
 define void @fmadd_ss_maskz_memfold(float* %a, float* %b, i8 %c) {
-; CHECK-LABEL: fmadd_ss_maskz_memfold:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
-; CHECK-NEXT:    kmovw %edx, %k1
-; CHECK-NEXT:    vmovss %xmm0, %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    vmovss %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; X64-LABEL: fmadd_ss_maskz_memfold:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
+; X64-NEXT:    kmovw %edx, %k1
+; X64-NEXT:    vmovss %xmm0, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    vmovss %xmm0, (%rdi)
+; X64-NEXT:    retq
+;
+; X86-LABEL: fmadd_ss_maskz_memfold:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    vfmadd231ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovss %xmm0, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    vmovss %xmm0, (%edx)
+; X86-NEXT:    retl
   %a.val = load float, float* %a
   %av0 = insertelement <4 x float> undef, float %a.val, i32 0
   %av1 = insertelement <4 x float> %av0, float 0.000000e+00, i32 1
@@ -4471,15 +6085,28 @@ define void @fmadd_ss_maskz_memfold(floa
 }
 
 define void @fmadd_sd_mask_memfold(double* %a, double* %b, i8 %c) {
-; CHECK-LABEL: fmadd_sd_mask_memfold:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; CHECK-NEXT:    vfmadd213sd {{.*#+}} xmm1 = (xmm0 * xmm1) + xmm0
-; CHECK-NEXT:    kmovw %edx, %k1
-; CHECK-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 {%k1}
-; CHECK-NEXT:    vmovsd %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; X64-LABEL: fmadd_sd_mask_memfold:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X64-NEXT:    vfmadd213sd {{.*#+}} xmm1 = (xmm0 * xmm1) + xmm0
+; X64-NEXT:    kmovw %edx, %k1
+; X64-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 {%k1}
+; X64-NEXT:    vmovsd %xmm0, (%rdi)
+; X64-NEXT:    retq
+;
+; X86-LABEL: fmadd_sd_mask_memfold:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X86-NEXT:    vfmadd213sd {{.*#+}} xmm1 = (xmm0 * xmm1) + xmm0
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 {%k1}
+; X86-NEXT:    vmovsd %xmm0, (%edx)
+; X86-NEXT:    retl
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -4501,14 +6128,26 @@ define void @fmadd_sd_mask_memfold(doubl
 }
 
 define void @fmadd_sd_maskz_memfold(double* %a, double* %b, i8 %c) {
-; CHECK-LABEL: fmadd_sd_maskz_memfold:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
-; CHECK-NEXT:    kmovw %edx, %k1
-; CHECK-NEXT:    vmovsd %xmm0, %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT:    vmovsd %xmm0, (%rdi)
-; CHECK-NEXT:    retq
+; X64-LABEL: fmadd_sd_maskz_memfold:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
+; X64-NEXT:    kmovw %edx, %k1
+; X64-NEXT:    vmovsd %xmm0, %xmm0, %xmm0 {%k1} {z}
+; X64-NEXT:    vmovsd %xmm0, (%rdi)
+; X64-NEXT:    retq
+;
+; X86-LABEL: fmadd_sd_maskz_memfold:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-NEXT:    vfmadd231sd {{.*#+}} xmm0 = (xmm0 * mem) + xmm0
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovsd %xmm0, %xmm0, %xmm0 {%k1} {z}
+; X86-NEXT:    vmovsd %xmm0, (%edx)
+; X86-NEXT:    retl
   %a.val = load double, double* %a
   %av0 = insertelement <2 x double> undef, double %a.val, i32 0
   %av = insertelement <2 x double> %av0, double 0.000000e+00, i32 1
@@ -4530,20 +6169,36 @@ define void @fmadd_sd_maskz_memfold(doub
 }
 
 define <2 x double>@test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovapd %xmm2, %xmm3
-; CHECK-NEXT:    vfmsub231sd {{.*#+}} xmm3 = (xmm0 * xmm1) - xmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovapd %xmm2, %xmm4
-; CHECK-NEXT:    vfmsub231sd {{.*#+}} xmm4 = (xmm0 * xmm1) - xmm4
-; CHECK-NEXT:    vaddpd %xmm4, %xmm3, %xmm3
-; CHECK-NEXT:    vmovapd %xmm2, %xmm4
-; CHECK-NEXT:    vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4
-; CHECK-NEXT:    vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vaddpd %xmm2, %xmm4, %xmm0
-; CHECK-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask3_vfmsub_sd:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovapd %xmm2, %xmm3
+; X64-NEXT:    vfmsub231sd {{.*#+}} xmm3 = (xmm0 * xmm1) - xmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovapd %xmm2, %xmm4
+; X64-NEXT:    vfmsub231sd {{.*#+}} xmm4 = (xmm0 * xmm1) - xmm4
+; X64-NEXT:    vaddpd %xmm4, %xmm3, %xmm3
+; X64-NEXT:    vmovapd %xmm2, %xmm4
+; X64-NEXT:    vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4
+; X64-NEXT:    vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vaddpd %xmm2, %xmm4, %xmm0
+; X64-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask3_vfmsub_sd:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    vmovapd %xmm2, %xmm3
+; X86-NEXT:    vfmsub231sd {{.*#+}} xmm3 = (xmm0 * xmm1) - xmm3
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovapd %xmm2, %xmm4
+; X86-NEXT:    vfmsub231sd {{.*#+}} xmm4 = (xmm0 * xmm1) - xmm4
+; X86-NEXT:    vaddpd %xmm4, %xmm3, %xmm3
+; X86-NEXT:    vmovapd %xmm2, %xmm4
+; X86-NEXT:    vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4
+; X86-NEXT:    vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vaddpd %xmm2, %xmm4, %xmm0
+; X86-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X86-NEXT:    retl
   %1 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x2
   %2 = extractelement <2 x double> %x0, i64 0
   %3 = extractelement <2 x double> %x1, i64 0
@@ -4585,20 +6240,36 @@ define <2 x double>@test_int_x86_avx512_
 }
 
 define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovaps %xmm2, %xmm3
-; CHECK-NEXT:    vfmsub231ss {{.*#+}} xmm3 = (xmm0 * xmm1) - xmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovaps %xmm2, %xmm4
-; CHECK-NEXT:    vfmsub231ss {{.*#+}} xmm4 = (xmm0 * xmm1) - xmm4
-; CHECK-NEXT:    vaddps %xmm4, %xmm3, %xmm3
-; CHECK-NEXT:    vmovaps %xmm2, %xmm4
-; CHECK-NEXT:    vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4
-; CHECK-NEXT:    vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vaddps %xmm2, %xmm4, %xmm0
-; CHECK-NEXT:    vaddps %xmm0, %xmm3, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask3_vfmsub_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovaps %xmm2, %xmm3
+; X64-NEXT:    vfmsub231ss {{.*#+}} xmm3 = (xmm0 * xmm1) - xmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovaps %xmm2, %xmm4
+; X64-NEXT:    vfmsub231ss {{.*#+}} xmm4 = (xmm0 * xmm1) - xmm4
+; X64-NEXT:    vaddps %xmm4, %xmm3, %xmm3
+; X64-NEXT:    vmovaps %xmm2, %xmm4
+; X64-NEXT:    vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4
+; X64-NEXT:    vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vaddps %xmm2, %xmm4, %xmm0
+; X64-NEXT:    vaddps %xmm0, %xmm3, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask3_vfmsub_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    vmovaps %xmm2, %xmm3
+; X86-NEXT:    vfmsub231ss {{.*#+}} xmm3 = (xmm0 * xmm1) - xmm3
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovaps %xmm2, %xmm4
+; X86-NEXT:    vfmsub231ss {{.*#+}} xmm4 = (xmm0 * xmm1) - xmm4
+; X86-NEXT:    vaddps %xmm4, %xmm3, %xmm3
+; X86-NEXT:    vmovaps %xmm2, %xmm4
+; X86-NEXT:    vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4
+; X86-NEXT:    vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vaddps %xmm2, %xmm4, %xmm0
+; X86-NEXT:    vaddps %xmm0, %xmm3, %xmm0
+; X86-NEXT:    retl
   %1 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x2
   %2 = extractelement <4 x float> %x0, i64 0
   %3 = extractelement <4 x float> %x1, i64 0
@@ -4640,20 +6311,36 @@ define <4 x float>@test_int_x86_avx512_m
 }
 
 define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_sd:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovapd %xmm2, %xmm3
-; CHECK-NEXT:    vfnmsub231sd {{.*#+}} xmm3 = -(xmm0 * xmm1) - xmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovapd %xmm2, %xmm4
-; CHECK-NEXT:    vfnmsub231sd {{.*#+}} xmm4 = -(xmm0 * xmm1) - xmm4
-; CHECK-NEXT:    vaddpd %xmm4, %xmm3, %xmm3
-; CHECK-NEXT:    vmovapd %xmm2, %xmm4
-; CHECK-NEXT:    vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4
-; CHECK-NEXT:    vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vaddpd %xmm2, %xmm4, %xmm0
-; CHECK-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask3_vfnmsub_sd:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovapd %xmm2, %xmm3
+; X64-NEXT:    vfnmsub231sd {{.*#+}} xmm3 = -(xmm0 * xmm1) - xmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovapd %xmm2, %xmm4
+; X64-NEXT:    vfnmsub231sd {{.*#+}} xmm4 = -(xmm0 * xmm1) - xmm4
+; X64-NEXT:    vaddpd %xmm4, %xmm3, %xmm3
+; X64-NEXT:    vmovapd %xmm2, %xmm4
+; X64-NEXT:    vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4
+; X64-NEXT:    vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vaddpd %xmm2, %xmm4, %xmm0
+; X64-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask3_vfnmsub_sd:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    vmovapd %xmm2, %xmm3
+; X86-NEXT:    vfnmsub231sd {{.*#+}} xmm3 = -(xmm0 * xmm1) - xmm3
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovapd %xmm2, %xmm4
+; X86-NEXT:    vfnmsub231sd {{.*#+}} xmm4 = -(xmm0 * xmm1) - xmm4
+; X86-NEXT:    vaddpd %xmm4, %xmm3, %xmm3
+; X86-NEXT:    vmovapd %xmm2, %xmm4
+; X86-NEXT:    vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4
+; X86-NEXT:    vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vaddpd %xmm2, %xmm4, %xmm0
+; X86-NEXT:    vaddpd %xmm0, %xmm3, %xmm0
+; X86-NEXT:    retl
   %1 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x0
   %2 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %x2
   %3 = extractelement <2 x double> %1, i64 0
@@ -4699,20 +6386,36 @@ define <2 x double>@test_int_x86_avx512_
 }
 
 define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ss:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovaps %xmm2, %xmm3
-; CHECK-NEXT:    vfnmsub231ss {{.*#+}} xmm3 = -(xmm0 * xmm1) - xmm3
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vmovaps %xmm2, %xmm4
-; CHECK-NEXT:    vfnmsub231ss {{.*#+}} xmm4 = -(xmm0 * xmm1) - xmm4
-; CHECK-NEXT:    vaddps %xmm4, %xmm3, %xmm3
-; CHECK-NEXT:    vmovaps %xmm2, %xmm4
-; CHECK-NEXT:    vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4
-; CHECK-NEXT:    vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT:    vaddps %xmm2, %xmm4, %xmm0
-; CHECK-NEXT:    vaddps %xmm0, %xmm3, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask3_vfnmsub_ss:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovaps %xmm2, %xmm3
+; X64-NEXT:    vfnmsub231ss {{.*#+}} xmm3 = -(xmm0 * xmm1) - xmm3
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vmovaps %xmm2, %xmm4
+; X64-NEXT:    vfnmsub231ss {{.*#+}} xmm4 = -(xmm0 * xmm1) - xmm4
+; X64-NEXT:    vaddps %xmm4, %xmm3, %xmm3
+; X64-NEXT:    vmovaps %xmm2, %xmm4
+; X64-NEXT:    vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4
+; X64-NEXT:    vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X64-NEXT:    vaddps %xmm2, %xmm4, %xmm0
+; X64-NEXT:    vaddps %xmm0, %xmm3, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask3_vfnmsub_ss:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    vmovaps %xmm2, %xmm3
+; X86-NEXT:    vfnmsub231ss {{.*#+}} xmm3 = -(xmm0 * xmm1) - xmm3
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovaps %xmm2, %xmm4
+; X86-NEXT:    vfnmsub231ss {{.*#+}} xmm4 = -(xmm0 * xmm1) - xmm4
+; X86-NEXT:    vaddps %xmm4, %xmm3, %xmm3
+; X86-NEXT:    vmovaps %xmm2, %xmm4
+; X86-NEXT:    vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4
+; X86-NEXT:    vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; X86-NEXT:    vaddps %xmm2, %xmm4, %xmm0
+; X86-NEXT:    vaddps %xmm0, %xmm3, %xmm0
+; X86-NEXT:    retl
   %1 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x0
   %2 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %x2
   %3 = extractelement <4 x float> %1, i64 0
@@ -4758,12 +6461,21 @@ define <4 x float>@test_int_x86_avx512_m
 }
 
 define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1, float *%ptr_b ,i8 %x3,i32 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_rm:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vfmadd231ss {{.*#+}} xmm1 = (xmm0 * mem) + xmm1
-; CHECK-NEXT:    vmovaps %xmm1, %xmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_rm:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vfmadd231ss {{.*#+}} xmm1 = (xmm0 * mem) + xmm1
+; X64-NEXT:    vmovaps %xmm1, %xmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_rm:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    kmovw %ecx, %k1
+; X86-NEXT:    vfmadd231ss {{.*#+}} xmm1 = (xmm0 * mem) + xmm1
+; X86-NEXT:    vmovaps %xmm1, %xmm0
+; X86-NEXT:    retl
   %q = load float, float* %ptr_b
   %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
   %1 = extractelement <4 x float> %x0, i64 0
@@ -4778,11 +6490,19 @@ define <4 x float>@test_int_x86_avx512_m
 }
 
 define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,float *%ptr_b ,i8 %x3,i32 %x4) {
-; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ss_rm:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %esi, %k1
-; CHECK-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm1
-; CHECK-NEXT:    retq
+; X64-LABEL: test_int_x86_avx512_mask_vfmadd_ss_rm:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %esi, %k1
+; X64-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm1
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_int_x86_avx512_mask_vfmadd_ss_rm:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X86-NEXT:    kmovw %ecx, %k1
+; X86-NEXT:    vfmadd132ss {{.*#+}} xmm0 = (xmm0 * mem) + xmm1
+; X86-NEXT:    retl
   %q = load float, float* %ptr_b
   %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
   %1 = extractelement <4 x float> %x0, i64 0
@@ -4799,10 +6519,10 @@ define <4 x float>@test_int_x86_avx512_m
 
 define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,float *%ptr_b ,i8 %x3,i32 %x4) {
 ; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ss_rm:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %q = load float, float* %ptr_b
   %vecinit.i = insertelement <4 x float> undef, float %q, i32 0
   %1 = extractelement <4 x float> %x0, i64 0
@@ -4816,30 +6536,43 @@ define <4 x float>@test_int_x86_avx512_m
 
 define <16 x i32> @test_x86_avx512_psll_d_512(<16 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psll_d_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpslld %xmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
   ret <16 x i32> %res
 }
 define <16 x i32> @test_x86_avx512_mask_psll_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psll_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpslld %xmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psll_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpslld %xmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psll_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpslld %xmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %passthru
   ret <16 x i32> %res2
 }
 define <16 x i32> @test_x86_avx512_maskz_psll_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psll_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpslld %xmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psll_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpslld %xmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psll_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpslld %xmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psll.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
@@ -4850,30 +6583,45 @@ declare <16 x i32> @llvm.x86.avx512.psll
 
 define <8 x i64> @test_x86_avx512_psll_q_512(<8 x i64> %a0, <2 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psll_q_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsllq %xmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x i64> @llvm.x86.avx512.psll.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
   ret <8 x i64> %res
 }
 define <8 x i64> @test_x86_avx512_mask_psll_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psll_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsllq %xmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psll_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsllq %xmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psll_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsllq %xmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psll.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %passthru
   ret <8 x i64> %res2
 }
 define <8 x i64> @test_x86_avx512_maskz_psll_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psll_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsllq %xmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psll_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsllq %xmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psll_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsllq %xmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psll.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
@@ -4884,30 +6632,43 @@ declare <8 x i64> @llvm.x86.avx512.psll.
 
 define <16 x i32> @test_x86_avx512_pslli_d_512(<16 x i32> %a0) {
 ; CHECK-LABEL: test_x86_avx512_pslli_d_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpslld $7, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
   ret <16 x i32> %res
 }
 define <16 x i32> @test_x86_avx512_mask_pslli_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_pslli_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpslld $7, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_pslli_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpslld $7, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_pslli_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpslld $7, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %passthru
   ret <16 x i32> %res2
 }
 define <16 x i32> @test_x86_avx512_maskz_pslli_d_512(<16 x i32> %a0, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_pslli_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpslld $7, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_pslli_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpslld $7, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_pslli_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpslld $7, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.pslli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
@@ -4918,30 +6679,45 @@ declare <16 x i32> @llvm.x86.avx512.psll
 
 define <8 x i64> @test_x86_avx512_pslli_q_512(<8 x i64> %a0) {
 ; CHECK-LABEL: test_x86_avx512_pslli_q_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsllq $7, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
   ret <8 x i64> %res
 }
 define <8 x i64> @test_x86_avx512_mask_pslli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_pslli_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsllq $7, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_pslli_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsllq $7, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_pslli_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsllq $7, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %passthru
   ret <8 x i64> %res2
 }
 define <8 x i64> @test_x86_avx512_maskz_pslli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_pslli_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsllq $7, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_pslli_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsllq $7, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_pslli_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsllq $7, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.pslli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
@@ -4952,30 +6728,45 @@ declare <8 x i64> @llvm.x86.avx512.pslli
 
 define <8 x i64> @test_x86_avx512_psra_q_512(<8 x i64> %a0, <2 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psra_q_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsraq %xmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
   ret <8 x i64> %res
 }
 define <8 x i64> @test_x86_avx512_mask_psra_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psra_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsraq %xmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psra_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsraq %xmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psra_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsraq %xmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %passthru
   ret <8 x i64> %res2
 }
 define <8 x i64> @test_x86_avx512_maskz_psra_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psra_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsraq %xmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psra_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsraq %xmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psra_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsraq %xmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psra.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
@@ -4986,30 +6777,43 @@ declare <8 x i64> @llvm.x86.avx512.psra.
 
 define <16 x i32> @test_x86_avx512_psra_d_512(<16 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psra_d_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrad %xmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x i32> @llvm.x86.avx512.psra.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
   ret <16 x i32> %res
 }
 define <16 x i32> @test_x86_avx512_mask_psra_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psra_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrad %xmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psra_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrad %xmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psra_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpsrad %xmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psra.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %passthru
   ret <16 x i32> %res2
 }
 define <16 x i32> @test_x86_avx512_maskz_psra_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psra_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrad %xmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psra_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrad %xmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psra_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpsrad %xmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psra.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
@@ -5021,30 +6825,45 @@ declare <16 x i32> @llvm.x86.avx512.psra
 
 define <8 x i64> @test_x86_avx512_psrai_q_512(<8 x i64> %a0) {
 ; CHECK-LABEL: test_x86_avx512_psrai_q_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsraq $7, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
   ret <8 x i64> %res
 }
 define <8 x i64> @test_x86_avx512_mask_psrai_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psrai_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsraq $7, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psrai_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsraq $7, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psrai_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsraq $7, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %passthru
   ret <8 x i64> %res2
 }
 define <8 x i64> @test_x86_avx512_maskz_psrai_q_512(<8 x i64> %a0, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psrai_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsraq $7, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psrai_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsraq $7, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psrai_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsraq $7, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psrai.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
@@ -5055,30 +6874,43 @@ declare <8 x i64> @llvm.x86.avx512.psrai
 
 define <16 x i32> @test_x86_avx512_psrai_d_512(<16 x i32> %a0) {
 ; CHECK-LABEL: test_x86_avx512_psrai_d_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrad $7, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
   ret <16 x i32> %res
 }
 define <16 x i32> @test_x86_avx512_mask_psrai_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psrai_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrad $7, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psrai_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrad $7, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psrai_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpsrad $7, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %passthru
   ret <16 x i32> %res2
 }
 define <16 x i32> @test_x86_avx512_maskz_psrai_d_512(<16 x i32> %a0, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psrai_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrad $7, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psrai_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrad $7, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psrai_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpsrad $7, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psrai.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
@@ -5090,30 +6922,43 @@ declare <16 x i32> @llvm.x86.avx512.psra
 
 define <16 x i32> @test_x86_avx512_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrl_d_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrld %xmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x i32> @llvm.x86.avx512.psrl.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
   ret <16 x i32> %res
 }
 define <16 x i32> @test_x86_avx512_mask_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %passthru, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psrl_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrld %xmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psrl_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrld %xmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psrl_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpsrld %xmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psrl.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %passthru
   ret <16 x i32> %res2
 }
 define <16 x i32> @test_x86_avx512_maskz_psrl_d_512(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psrl_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrld %xmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psrl_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrld %xmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psrl_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpsrld %xmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psrl.d.512(<16 x i32> %a0, <4 x i32> %a1) ; <<16 x i32>> [#uses=1]
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
@@ -5124,30 +6969,45 @@ declare <16 x i32> @llvm.x86.avx512.psrl
 
 define <8 x i64> @test_x86_avx512_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrl_q_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrlq %xmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x i64> @llvm.x86.avx512.psrl.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
   ret <8 x i64> %res
 }
 define <8 x i64> @test_x86_avx512_mask_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %passthru, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psrl_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrlq %xmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psrl_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrlq %xmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psrl_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsrlq %xmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psrl.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %passthru
   ret <8 x i64> %res2
 }
 define <8 x i64> @test_x86_avx512_maskz_psrl_q_512(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psrl_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrlq %xmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psrl_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrlq %xmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psrl_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsrlq %xmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psrl.q.512(<8 x i64> %a0, <2 x i64> %a1) ; <<8 x i64>> [#uses=1]
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
@@ -5158,30 +7018,43 @@ declare <8 x i64> @llvm.x86.avx512.psrl.
 
 define <16 x i32> @test_x86_avx512_psrli_d_512(<16 x i32> %a0) {
 ; CHECK-LABEL: test_x86_avx512_psrli_d_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrld $7, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
   ret <16 x i32> %res
 }
 define <16 x i32> @test_x86_avx512_mask_psrli_d_512(<16 x i32> %a0, <16 x i32> %passthru, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psrli_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrld $7, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psrli_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrld $7, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psrli_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpsrld $7, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %passthru
   ret <16 x i32> %res2
 }
 define <16 x i32> @test_x86_avx512_maskz_psrli_d_512(<16 x i32> %a0, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psrli_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrld $7, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psrli_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrld $7, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psrli_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpsrld $7, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psrli.d.512(<16 x i32> %a0, i32 7) ; <<16 x i32>> [#uses=1]
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
@@ -5192,30 +7065,45 @@ declare <16 x i32> @llvm.x86.avx512.psrl
 
 define <8 x i64> @test_x86_avx512_psrli_q_512(<8 x i64> %a0) {
 ; CHECK-LABEL: test_x86_avx512_psrli_q_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrlq $7, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
   ret <8 x i64> %res
 }
 define <8 x i64> @test_x86_avx512_mask_psrli_q_512(<8 x i64> %a0, <8 x i64> %passthru, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psrli_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrlq $7, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm1, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psrli_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrlq $7, %zmm0, %zmm1 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psrli_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsrlq $7, %zmm0, %zmm1 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm1, %zmm0
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %passthru
   ret <8 x i64> %res2
 }
 define <8 x i64> @test_x86_avx512_maskz_psrli_q_512(<8 x i64> %a0, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psrli_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrlq $7, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psrli_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrlq $7, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psrli_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsrlq $7, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psrli.q.512(<8 x i64> %a0, i32 7) ; <<8 x i64>> [#uses=1]
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
@@ -5225,22 +7113,31 @@ declare <8 x i64> @llvm.x86.avx512.psrli
 
 define <16 x i32> @test_x86_avx512_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psllv_d_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsllvd %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %a0, <16 x i32> %a1)
   ret <16 x i32> %res
 }
 
 define <16 x i32> @test_x86_avx512_psllv_d_512_const() {
-; CHECK-LABEL: test_x86_avx512_psllv_d_512_const:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0]
-; CHECK-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
-; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295]
-; CHECK-NEXT:    vpsllvd {{.*}}(%rip), %zmm1, %zmm1
-; CHECK-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_psllv_d_512_const:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0]
+; X64-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; X64-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295]
+; X64-NEXT:    vpsllvd {{.*}}(%rip), %zmm1, %zmm1
+; X64-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_psllv_d_512_const:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0]
+; X86-NEXT:    vpsllvd {{\.LCPI.*}}, %zmm0, %zmm0
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295]
+; X86-NEXT:    vpsllvd {{\.LCPI.*}}, %zmm1, %zmm1
+; X86-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
+; X86-NEXT:    retl
   %res0 = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0, i32 4, i32 5, i32 -2, i32 0, i32 5, i32 3, i32 -3, i32 0>, <16 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2, i32 3, i32 0, i32 35, i32 -1, i32 4, i32 0, i32 36, i32 -3>)
   %res1 = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1,  i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,  i32 1, i32 1, i32 1, i32 1, i32 -1>)
   %res2 = add <16 x i32> %res0, %res1
@@ -5248,12 +7145,19 @@ define <16 x i32> @test_x86_avx512_psllv
 }
 
 define <16 x i32> @test_x86_avx512_mask_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psllv_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsllvd %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psllv_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsllvd %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psllv_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpsllvd %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %a0, <16 x i32> %a1)
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %a2
@@ -5261,11 +7165,17 @@ define <16 x i32> @test_x86_avx512_mask_
 }
 
 define <16 x i32> @test_x86_avx512_maskz_psllv_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psllv_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsllvd %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psllv_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsllvd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psllv_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpsllvd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> %a0, <16 x i32> %a1)
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
@@ -5276,22 +7186,31 @@ declare <16 x i32> @llvm.x86.avx512.psll
 
 define <8 x i64> @test_x86_avx512_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psllv_q_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsllvq %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> %a0, <8 x i64> %a1)
   ret <8 x i64> %res
 }
 
 define <8 x i64> @test_x86_avx512_psllv_q_512_const() {
-; CHECK-LABEL: test_x86_avx512_psllv_q_512_const:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,9,0,18446744073709551615,3,7,18446744073709551615,0]
-; CHECK-NEXT:    vpsllvq {{.*}}(%rip), %zmm0, %zmm0
-; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,18446744073709551615]
-; CHECK-NEXT:    vpsllvq {{.*}}(%rip), %zmm1, %zmm1
-; CHECK-NEXT:    vpaddq %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_psllv_q_512_const:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,9,0,18446744073709551615,3,7,18446744073709551615,0]
+; X64-NEXT:    vpsllvq {{.*}}(%rip), %zmm0, %zmm0
+; X64-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,18446744073709551615]
+; X64-NEXT:    vpsllvq {{.*}}(%rip), %zmm1, %zmm1
+; X64-NEXT:    vpaddq %zmm1, %zmm0, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_psllv_q_512_const:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,0,9,0,0,0,4294967295,4294967295,3,0,7,0,4294967295,4294967295,0,0]
+; X86-NEXT:    vpsllvq {{\.LCPI.*}}, %zmm0, %zmm0
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [4,0,4,0,4,0,4,0,4,0,4,0,4,0,4294967295,4294967295]
+; X86-NEXT:    vpsllvq {{\.LCPI.*}}, %zmm1, %zmm1
+; X86-NEXT:    vpaddq %zmm1, %zmm0, %zmm0
+; X86-NEXT:    retl
   %res0 = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> <i64 2, i64 9, i64 0, i64 -1, i64 3, i64 7, i64 -1, i64 0>, <8 x i64> <i64 1, i64 0, i64 33, i64 -1,i64 2, i64 0, i64 34, i64 -2>)
   %res1 = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> <i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1,  i64 1, i64 1, i64 1, i64 -1>)
   %res2 = add <8 x i64> %res0, %res1
@@ -5299,12 +7218,20 @@ define <8 x i64> @test_x86_avx512_psllv_
 }
 
 define <8 x i64> @test_x86_avx512_mask_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psllv_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsllvq %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psllv_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsllvq %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psllv_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsllvq %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> %a0, <8 x i64> %a1)
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %a2
@@ -5312,11 +7239,18 @@ define <8 x i64> @test_x86_avx512_mask_p
 }
 
 define <8 x i64> @test_x86_avx512_maskz_psllv_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psllv_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsllvq %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psllv_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsllvq %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psllv_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsllvq %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> %a0, <8 x i64> %a1)
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
@@ -5327,20 +7261,27 @@ declare <8 x i64> @llvm.x86.avx512.psllv
 
 define <16 x i32> @test_x86_avx512_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrav_d_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsravd %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32> %a0, <16 x i32> %a1)
   ret <16 x i32> %res
 }
 
 define <16 x i32> @test_x86_avx512_mask_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psrav_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsravd %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psrav_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsravd %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psrav_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpsravd %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32> %a0, <16 x i32> %a1)
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %a2
@@ -5348,11 +7289,17 @@ define <16 x i32> @test_x86_avx512_mask_
 }
 
 define <16 x i32> @test_x86_avx512_maskz_psrav_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psrav_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsravd %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psrav_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsravd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psrav_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpsravd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psrav.d.512(<16 x i32> %a0, <16 x i32> %a1)
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
@@ -5363,20 +7310,28 @@ declare <16 x i32> @llvm.x86.avx512.psra
 
 define <8 x i64> @test_x86_avx512_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrav_q_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsravq %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64> %a0, <8 x i64> %a1)
   ret <8 x i64> %res
 }
 
 define <8 x i64> @test_x86_avx512_mask_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psrav_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsravq %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psrav_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsravq %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psrav_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsravq %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64> %a0, <8 x i64> %a1)
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %a2
@@ -5384,11 +7339,18 @@ define <8 x i64> @test_x86_avx512_mask_p
 }
 
 define <8 x i64> @test_x86_avx512_maskz_psrav_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psrav_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsravq %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psrav_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsravq %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psrav_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsravq %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psrav.q.512(<8 x i64> %a0, <8 x i64> %a1)
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
@@ -5399,22 +7361,31 @@ declare <8 x i64> @llvm.x86.avx512.psrav
 
 define <16 x i32> @test_x86_avx512_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrlv_d_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> %a0, <16 x i32> %a1)
   ret <16 x i32> %res
 }
 
 define <16 x i32> @test_x86_avx512_psrlv_d_512_const() {
-; CHECK-LABEL: test_x86_avx512_psrlv_d_512_const:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0]
-; CHECK-NEXT:    vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
-; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295]
-; CHECK-NEXT:    vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
-; CHECK-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_psrlv_d_512_const:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0]
+; X64-NEXT:    vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
+; X64-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295]
+; X64-NEXT:    vpsrlvd {{.*}}(%rip), %zmm1, %zmm1
+; X64-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_psrlv_d_512_const:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0]
+; X86-NEXT:    vpsrlvd {{\.LCPI.*}}, %zmm0, %zmm0
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295]
+; X86-NEXT:    vpsrlvd {{\.LCPI.*}}, %zmm1, %zmm1
+; X86-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
+; X86-NEXT:    retl
   %res0 = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0, i32 4, i32 5, i32 -2, i32 0, i32 5, i32 3, i32 -3, i32 0>, <16 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2, i32 3, i32 0, i32 35, i32 -1, i32 4, i32 0, i32 36, i32 -3>)
   %res1 = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1,  i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,  i32 1, i32 1, i32 1, i32 1, i32 -1  >)
   %res2 = add <16 x i32> %res0, %res1
@@ -5422,12 +7393,19 @@ define <16 x i32> @test_x86_avx512_psrlv
 }
 
 define <16 x i32> @test_x86_avx512_mask_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psrlv_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psrlv_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psrlv_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> %a0, <16 x i32> %a1)
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %a2
@@ -5435,11 +7413,17 @@ define <16 x i32> @test_x86_avx512_mask_
 }
 
 define <16 x i32> @test_x86_avx512_maskz_psrlv_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psrlv_d_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psrlv_d_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psrlv_d_512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1
+; X86-NEXT:    vpsrlvd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> %a0, <16 x i32> %a1)
   %mask.cast = bitcast i16 %mask to <16 x i1>
   %res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> zeroinitializer
@@ -5450,22 +7434,31 @@ declare <16 x i32> @llvm.x86.avx512.psrl
 
 define <8 x i64> @test_x86_avx512_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1) {
 ; CHECK-LABEL: test_x86_avx512_psrlv_q_512:
-; CHECK:       ## %bb.0:
+; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpsrlvq %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; CHECK-NEXT:    ret{{[l|q]}}
   %res = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> %a0, <8 x i64> %a1)
   ret <8 x i64> %res
 }
 
 define <8 x i64> @test_x86_avx512_psrlv_q_512_const() {
-; CHECK-LABEL: test_x86_avx512_psrlv_q_512_const:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,9,0,18446744073709551615,3,7,18446744073709551615,0]
-; CHECK-NEXT:    vpsrlvq {{.*}}(%rip), %zmm0, %zmm0
-; CHECK-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,18446744073709551615]
-; CHECK-NEXT:    vpsrlvq {{.*}}(%rip), %zmm1, %zmm1
-; CHECK-NEXT:    vpaddq %zmm1, %zmm0, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_psrlv_q_512_const:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,9,0,18446744073709551615,3,7,18446744073709551615,0]
+; X64-NEXT:    vpsrlvq {{.*}}(%rip), %zmm0, %zmm0
+; X64-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,18446744073709551615]
+; X64-NEXT:    vpsrlvq {{.*}}(%rip), %zmm1, %zmm1
+; X64-NEXT:    vpaddq %zmm1, %zmm0, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_psrlv_q_512_const:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,0,9,0,0,0,4294967295,4294967295,3,0,7,0,4294967295,4294967295,0,0]
+; X86-NEXT:    vpsrlvq {{\.LCPI.*}}, %zmm0, %zmm0
+; X86-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [4,0,4,0,4,0,4,0,4,0,4,0,4,0,4294967295,4294967295]
+; X86-NEXT:    vpsrlvq {{\.LCPI.*}}, %zmm1, %zmm1
+; X86-NEXT:    vpaddq %zmm1, %zmm0, %zmm0
+; X86-NEXT:    retl
   %res0 = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> <i64 2, i64 9, i64 0, i64 -1, i64 3, i64 7, i64 -1, i64 0>, <8 x i64> <i64 1, i64 0, i64 33, i64 -1,i64 2, i64 0, i64 34, i64 -2>)
   %res1 = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> <i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1,  i64 1, i64 1, i64 1, i64 -1>)
   %res2 = add <8 x i64> %res0, %res1
@@ -5473,12 +7466,20 @@ define <8 x i64> @test_x86_avx512_psrlv_
 }
 
 define <8 x i64> @test_x86_avx512_mask_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_mask_psrlv_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrlvq %zmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT:    vmovdqa64 %zmm2, %zmm0
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_mask_psrlv_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrlvq %zmm1, %zmm0, %zmm2 {%k1}
+; X64-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_mask_psrlv_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsrlvq %zmm1, %zmm0, %zmm2 {%k1}
+; X86-NEXT:    vmovdqa64 %zmm2, %zmm0
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> %a0, <8 x i64> %a1)
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> %a2
@@ -5486,11 +7487,18 @@ define <8 x i64> @test_x86_avx512_mask_p
 }
 
 define <8 x i64> @test_x86_avx512_maskz_psrlv_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
-; CHECK-LABEL: test_x86_avx512_maskz_psrlv_q_512:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    kmovw %edi, %k1
-; CHECK-NEXT:    vpsrlvq %zmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT:    retq
+; X64-LABEL: test_x86_avx512_maskz_psrlv_q_512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovw %edi, %k1
+; X64-NEXT:    vpsrlvq %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test_x86_avx512_maskz_psrlv_q_512:
+; X86:       # %bb.0:
+; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vpsrlvq %zmm1, %zmm0, %zmm0 {%k1} {z}
+; X86-NEXT:    retl
   %res = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> %a0, <8 x i64> %a1)
   %mask.cast = bitcast i8 %mask to <8 x i1>
   %res2 = select <8 x i1> %mask.cast, <8 x i64> %res, <8 x i64> zeroinitializer
@@ -5500,19 +7508,45 @@ define <8 x i64> @test_x86_avx512_maskz_
 declare <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64>, <8 x i64>) nounwind readnone
 
 define <16 x float> @bad_mask_transition(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double> %d, <16 x float> %e, <16 x float> %f) {
-; CHECK-LABEL: bad_mask_transition:
-; CHECK:       ## %bb.0: ## %entry
-; CHECK-NEXT:    vcmplt_oqpd %zmm1, %zmm0, %k0
-; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    vcmplt_oqpd %zmm3, %zmm2, %k0
-; CHECK-NEXT:    kmovw %k0, %ecx
-; CHECK-NEXT:    movzbl %al, %eax
-; CHECK-NEXT:    movzbl %cl, %ecx
-; CHECK-NEXT:    kmovw %eax, %k0
-; CHECK-NEXT:    kmovw %ecx, %k1
-; CHECK-NEXT:    kunpckbw %k0, %k1, %k1
-; CHECK-NEXT:    vblendmps %zmm5, %zmm4, %zmm0 {%k1}
-; CHECK-NEXT:    retq
+; X64-LABEL: bad_mask_transition:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vcmplt_oqpd %zmm1, %zmm0, %k0
+; X64-NEXT:    kmovw %k0, %eax
+; X64-NEXT:    vcmplt_oqpd %zmm3, %zmm2, %k0
+; X64-NEXT:    kmovw %k0, %ecx
+; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    movzbl %cl, %ecx
+; X64-NEXT:    kmovw %eax, %k0
+; X64-NEXT:    kmovw %ecx, %k1
+; X64-NEXT:    kunpckbw %k0, %k1, %k1
+; X64-NEXT:    vblendmps %zmm5, %zmm4, %zmm0 {%k1}
+; X64-NEXT:    retq
+;
+; X86-LABEL: bad_mask_transition:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    .cfi_offset %ebp, -8
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    .cfi_def_cfa_register %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vmovaps 72(%ebp), %zmm3
+; X86-NEXT:    vcmplt_oqpd %zmm1, %zmm0, %k0
+; X86-NEXT:    kmovw %k0, %eax
+; X86-NEXT:    vcmplt_oqpd 8(%ebp), %zmm2, %k0
+; X86-NEXT:    kmovw %k0, %ecx
+; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    movzbl %cl, %ecx
+; X86-NEXT:    kmovw %eax, %k0
+; X86-NEXT:    kmovw %ecx, %k1
+; X86-NEXT:    kunpckbw %k0, %k1, %k1
+; X86-NEXT:    vmovaps 136(%ebp), %zmm3 {%k1}
+; X86-NEXT:    vmovaps %zmm3, %zmm0
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    .cfi_def_cfa %esp, 4
+; X86-NEXT:    retl
 entry:
   %0 = call <8 x i1> @llvm.x86.avx512.cmp.pd.512(<8 x double> %a, <8 x double> %b, i32 17, i32 4)
   %1 = bitcast <8 x i1> %0 to i8
@@ -5530,14 +7564,35 @@ entry:
 }
 
 define <16 x float> @bad_mask_transition_2(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double> %d, <16 x float> %e, <16 x float> %f) {
-; CHECK-LABEL: bad_mask_transition_2:
-; CHECK:       ## %bb.0: ## %entry
-; CHECK-NEXT:    vcmplt_oqpd %zmm1, %zmm0, %k0
-; CHECK-NEXT:    kmovw %k0, %eax
-; CHECK-NEXT:    movzbl %al, %eax
-; CHECK-NEXT:    kmovw %eax, %k1
-; CHECK-NEXT:    vblendmps %zmm5, %zmm4, %zmm0 {%k1}
-; CHECK-NEXT:    retq
+; X64-LABEL: bad_mask_transition_2:
+; X64:       # %bb.0: # %entry
+; X64-NEXT:    vcmplt_oqpd %zmm1, %zmm0, %k0
+; X64-NEXT:    kmovw %k0, %eax
+; X64-NEXT:    movzbl %al, %eax
+; X64-NEXT:    kmovw %eax, %k1
+; X64-NEXT:    vblendmps %zmm5, %zmm4, %zmm0 {%k1}
+; X64-NEXT:    retq
+;
+; X86-LABEL: bad_mask_transition_2:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    .cfi_offset %ebp, -8
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    .cfi_def_cfa_register %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vmovaps 72(%ebp), %zmm2
+; X86-NEXT:    vcmplt_oqpd %zmm1, %zmm0, %k0
+; X86-NEXT:    kmovw %k0, %eax
+; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    kmovw %eax, %k1
+; X86-NEXT:    vmovaps 136(%ebp), %zmm2 {%k1}
+; X86-NEXT:    vmovaps %zmm2, %zmm0
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    .cfi_def_cfa %esp, 4
+; X86-NEXT:    retl
 entry:
   %0 = call <8 x i1> @llvm.x86.avx512.cmp.pd.512(<8 x double> %a, <8 x double> %b, i32 17, i32 4)
   %1 = bitcast <8 x i1> %0 to i8




More information about the llvm-commits mailing list