[llvm] 40f7ac1 - [CodeGen][X86] Remove unused check-prefix in strict FP tests.

via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 1 22:41:26 PST 2020


Author: Wang, Pengfei
Date: 2020-11-02T14:41:06+08:00
New Revision: 40f7ac1a8f61aedce6e521d454e0961604cf1642

URL: https://github.com/llvm/llvm-project/commit/40f7ac1a8f61aedce6e521d454e0961604cf1642
DIFF: https://github.com/llvm/llvm-project/commit/40f7ac1a8f61aedce6e521d454e0961604cf1642.diff

LOG: [CodeGen][X86] Remove unused check-prefix in strict FP tests.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/fp-intrinsics-fma.ll
    llvm/test/CodeGen/X86/fp-intrinsics.ll
    llvm/test/CodeGen/X86/fp-strict-scalar-cmp.ll
    llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll
    llvm/test/CodeGen/X86/fp-strict-scalar-round.ll
    llvm/test/CodeGen/X86/fp80-strict-libcalls.ll
    llvm/test/CodeGen/X86/fp80-strict-scalar-cmp.ll
    llvm/test/CodeGen/X86/fp80-strict-scalar.ll
    llvm/test/CodeGen/X86/vec-strict-cmp-128.ll
    llvm/test/CodeGen/X86/vec-strict-cmp-256.ll
    llvm/test/CodeGen/X86/vec-strict-cmp-512.ll
    llvm/test/CodeGen/X86/vec-strict-cmp-sub128.ll
    llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
    llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll
    llvm/test/CodeGen/X86/vec-strict-fptoint-512.ll
    llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
    llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/fp-intrinsics-fma.ll b/llvm/test/CodeGen/X86/fp-intrinsics-fma.ll
index 04de8ec49717..bc16cd768dcb 100644
--- a/llvm/test/CodeGen/X86/fp-intrinsics-fma.ll
+++ b/llvm/test/CodeGen/X86/fp-intrinsics-fma.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefixes=COMMON,NOFMA
-; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+fma < %s | FileCheck %s --check-prefixes=COMMON,FMA,FMA-AVX1
-; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+fma4 < %s | FileCheck %s --check-prefixes=COMMON,FMA4
-; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512f < %s | FileCheck %s --check-prefixes=COMMON,FMA,FMA-AVX512
+; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefix=NOFMA
+; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+fma < %s | FileCheck %s --check-prefixes=FMA,FMA-AVX1
+; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+fma4 < %s | FileCheck %s --check-prefix=FMA4
+; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512f < %s | FileCheck %s --check-prefixes=FMA,FMA-AVX512
 
 define float @f1(float %0, float %1, float %2) #0 {
 ; NOFMA-LABEL: f1:

diff  --git a/llvm/test/CodeGen/X86/fp-intrinsics.ll b/llvm/test/CodeGen/X86/fp-intrinsics.ll
index 7bef1d678029..8cd021ad54e3 100644
--- a/llvm/test/CodeGen/X86/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/fp-intrinsics.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -O3 -mtriple=i686-pc-linux -mattr=+cmov < %s | FileCheck %s --check-prefix=COMMON --check-prefix=X87
-; RUN: llc -O3 -mtriple=i686-pc-linux -mattr=sse2 < %s | FileCheck %s --check-prefix=COMMON --check-prefix=X86-SSE
-; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefix=COMMON --check-prefix=SSE
-; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck %s --check-prefix=COMMON --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512f < %s | FileCheck %s --check-prefix=COMMON --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
-; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512dq < %s | FileCheck %s --check-prefix=COMMON --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512DQ
+; RUN: llc -O3 -mtriple=i686-pc-linux -mattr=+cmov < %s | FileCheck %s --check-prefix=X87
+; RUN: llc -O3 -mtriple=i686-pc-linux -mattr=sse2 < %s | FileCheck %s --check-prefix=X86-SSE
+; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s --check-prefix=SSE
+; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512f < %s | FileCheck %s --check-prefixes=AVX,AVX512
+; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx512dq < %s | FileCheck %s --check-prefixes=AVX,AVX512
 
 ; Verify that constants aren't folded to inexact results when the rounding mode
 ; is unknown.

diff  --git a/llvm/test/CodeGen/X86/fp-strict-scalar-cmp.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-cmp.ll
index 7f9e57d94f73..a7291aeea6af 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar-cmp.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar-cmp.ll
@@ -1,12 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK-32,SSE-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK-64,SSE-64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK-32,AVX-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK-64,AVX-64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK-32,AVX-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK-64,AVX-64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse -O3 | FileCheck %s --check-prefixes=X87
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse,+cmov -O3 | FileCheck %s --check-prefixes=X87-CMOV
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefix=SSE-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefix=SSE-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefix=AVX-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefix=AVX-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefix=AVX-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefix=AVX-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse -O3 | FileCheck %s --check-prefix=X87
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse,+cmov -O3 | FileCheck %s --check-prefix=X87-CMOV
 
 define i32 @test_f32_oeq_q(i32 %a, i32 %b, float %f1, float %f2) #0 {
 ; SSE-32-LABEL: test_f32_oeq_q:

diff  --git a/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll
index c3576addfcdd..e030a9159710 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll
@@ -5,7 +5,7 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX-X64,AVX1-X64
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=AVX-X86,AVX512-X86
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=AVX-X64,AVX512-X64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse -O3 | FileCheck %s --check-prefixes=CHECK,X87
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse -O3 | FileCheck %s --check-prefixes=X87
 
 declare i1  @llvm.experimental.constrained.fptosi.i1.f32(float, metadata)
 declare i8  @llvm.experimental.constrained.fptosi.i8.f32(float, metadata)
@@ -54,23 +54,23 @@ define i1 @fptosi_f32toi1(float %x) #0 {
 ; AVX-X64-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptosi_f32toi1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    subl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 12
-; CHECK-NEXT:    flds {{[0-9]+}}(%esp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistps {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movb {{[0-9]+}}(%esp), %al
-; CHECK-NEXT:    addl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptosi_f32toi1:
+; X87:       # %bb.0:
+; X87-NEXT:    subl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 12
+; X87-NEXT:    flds {{[0-9]+}}(%esp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistps {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X87-NEXT:    addl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 4
+; X87-NEXT:    retl
   %result = call i1 @llvm.experimental.constrained.fptosi.i1.f32(float %x,
                                                metadata !"fpexcept.strict") #0
   ret i1 %result
@@ -101,23 +101,23 @@ define i8 @fptosi_f32toi8(float %x) #0 {
 ; AVX-X64-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptosi_f32toi8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    subl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 12
-; CHECK-NEXT:    flds {{[0-9]+}}(%esp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistps {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movb {{[0-9]+}}(%esp), %al
-; CHECK-NEXT:    addl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptosi_f32toi8:
+; X87:       # %bb.0:
+; X87-NEXT:    subl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 12
+; X87-NEXT:    flds {{[0-9]+}}(%esp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistps {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X87-NEXT:    addl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 4
+; X87-NEXT:    retl
   %result = call i8 @llvm.experimental.constrained.fptosi.i8.f32(float %x,
                                                metadata !"fpexcept.strict") #0
   ret i8 %result
@@ -148,23 +148,23 @@ define i16 @fptosi_f32toi16(float %x) #0 {
 ; AVX-X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptosi_f32toi16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    subl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 12
-; CHECK-NEXT:    flds {{[0-9]+}}(%esp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistps {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    addl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptosi_f32toi16:
+; X87:       # %bb.0:
+; X87-NEXT:    subl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 12
+; X87-NEXT:    flds {{[0-9]+}}(%esp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistps {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    addl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 4
+; X87-NEXT:    retl
   %result = call i16 @llvm.experimental.constrained.fptosi.i16.f32(float %x,
                                                metadata !"fpexcept.strict") #0
   ret i16 %result
@@ -191,23 +191,23 @@ define i32 @fptosi_f32toi32(float %x) #0 {
 ; AVX-X64-NEXT:    vcvttss2si %xmm0, %eax
 ; AVX-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptosi_f32toi32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    subl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 12
-; CHECK-NEXT:    flds {{[0-9]+}}(%esp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw (%esp)
-; CHECK-NEXT:    movzwl (%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistpl {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw (%esp)
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    addl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptosi_f32toi32:
+; X87:       # %bb.0:
+; X87-NEXT:    subl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 12
+; X87-NEXT:    flds {{[0-9]+}}(%esp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw (%esp)
+; X87-NEXT:    movzwl (%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistpl {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw (%esp)
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    addl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 4
+; X87-NEXT:    retl
   %result = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %x,
                                                metadata !"fpexcept.strict") #0
   ret i32 %result
@@ -272,30 +272,30 @@ define i64 @fptosi_f32toi64(float %x) #0 {
 ; AVX-X64-NEXT:    vcvttss2si %xmm0, %rax
 ; AVX-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptosi_f32toi64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pushl %ebp
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-NEXT:    .cfi_offset %ebp, -8
-; CHECK-NEXT:    movl %esp, %ebp
-; CHECK-NEXT:    .cfi_def_cfa_register %ebp
-; CHECK-NEXT:    andl $-8, %esp
-; CHECK-NEXT:    subl $16, %esp
-; CHECK-NEXT:    flds 8(%ebp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistpll {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK-NEXT:    movl %ebp, %esp
-; CHECK-NEXT:    popl %ebp
-; CHECK-NEXT:    .cfi_def_cfa %esp, 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptosi_f32toi64:
+; X87:       # %bb.0:
+; X87-NEXT:    pushl %ebp
+; X87-NEXT:    .cfi_def_cfa_offset 8
+; X87-NEXT:    .cfi_offset %ebp, -8
+; X87-NEXT:    movl %esp, %ebp
+; X87-NEXT:    .cfi_def_cfa_register %ebp
+; X87-NEXT:    andl $-8, %esp
+; X87-NEXT:    subl $16, %esp
+; X87-NEXT:    flds 8(%ebp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistpll {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X87-NEXT:    movl %ebp, %esp
+; X87-NEXT:    popl %ebp
+; X87-NEXT:    .cfi_def_cfa %esp, 4
+; X87-NEXT:    retl
   %result = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %x,
                                                metadata !"fpexcept.strict") #0
   ret i64 %result
@@ -326,23 +326,23 @@ define i1 @fptoui_f32toi1(float %x) #0 {
 ; AVX-X64-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptoui_f32toi1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    subl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 12
-; CHECK-NEXT:    flds {{[0-9]+}}(%esp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistps {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movb {{[0-9]+}}(%esp), %al
-; CHECK-NEXT:    addl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptoui_f32toi1:
+; X87:       # %bb.0:
+; X87-NEXT:    subl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 12
+; X87-NEXT:    flds {{[0-9]+}}(%esp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistps {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X87-NEXT:    addl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 4
+; X87-NEXT:    retl
   %result = call i1 @llvm.experimental.constrained.fptoui.i1.f32(float %x,
                                                metadata !"fpexcept.strict") #0
   ret i1 %result
@@ -373,23 +373,23 @@ define i8 @fptoui_f32toi8(float %x) #0 {
 ; AVX-X64-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptoui_f32toi8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    subl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 12
-; CHECK-NEXT:    flds {{[0-9]+}}(%esp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistps {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movb {{[0-9]+}}(%esp), %al
-; CHECK-NEXT:    addl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptoui_f32toi8:
+; X87:       # %bb.0:
+; X87-NEXT:    subl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 12
+; X87-NEXT:    flds {{[0-9]+}}(%esp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistps {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X87-NEXT:    addl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 4
+; X87-NEXT:    retl
   %result = call i8 @llvm.experimental.constrained.fptoui.i8.f32(float %x,
                                                metadata !"fpexcept.strict") #0
   ret i8 %result
@@ -420,24 +420,24 @@ define i16 @fptoui_f32toi16(float %x) #0 {
 ; AVX-X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptoui_f32toi16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    subl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 12
-; CHECK-NEXT:    flds {{[0-9]+}}(%esp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw (%esp)
-; CHECK-NEXT:    movzwl (%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistpl {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw (%esp)
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    # kill: def $ax killed $ax killed $eax
-; CHECK-NEXT:    addl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptoui_f32toi16:
+; X87:       # %bb.0:
+; X87-NEXT:    subl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 12
+; X87-NEXT:    flds {{[0-9]+}}(%esp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw (%esp)
+; X87-NEXT:    movzwl (%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistpl {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw (%esp)
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    # kill: def $ax killed $ax killed $eax
+; X87-NEXT:    addl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 4
+; X87-NEXT:    retl
   %result = call i16 @llvm.experimental.constrained.fptoui.i16.f32(float %x,
                                                metadata !"fpexcept.strict") #0
   ret i16 %result
@@ -504,29 +504,29 @@ define i32 @fptoui_f32toi32(float %x) #0 {
 ; AVX512-X64-NEXT:    vcvttss2usi %xmm0, %eax
 ; AVX512-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptoui_f32toi32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pushl %ebp
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-NEXT:    .cfi_offset %ebp, -8
-; CHECK-NEXT:    movl %esp, %ebp
-; CHECK-NEXT:    .cfi_def_cfa_register %ebp
-; CHECK-NEXT:    andl $-8, %esp
-; CHECK-NEXT:    subl $16, %esp
-; CHECK-NEXT:    flds 8(%ebp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistpll {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    movl %ebp, %esp
-; CHECK-NEXT:    popl %ebp
-; CHECK-NEXT:    .cfi_def_cfa %esp, 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptoui_f32toi32:
+; X87:       # %bb.0:
+; X87-NEXT:    pushl %ebp
+; X87-NEXT:    .cfi_def_cfa_offset 8
+; X87-NEXT:    .cfi_offset %ebp, -8
+; X87-NEXT:    movl %esp, %ebp
+; X87-NEXT:    .cfi_def_cfa_register %ebp
+; X87-NEXT:    andl $-8, %esp
+; X87-NEXT:    subl $16, %esp
+; X87-NEXT:    flds 8(%ebp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistpll {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    movl %ebp, %esp
+; X87-NEXT:    popl %ebp
+; X87-NEXT:    .cfi_def_cfa %esp, 4
+; X87-NEXT:    retl
   %result = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x,
                                                metadata !"fpexcept.strict") #0
   ret i32 %result
@@ -673,49 +673,49 @@ define i64 @fptoui_f32toi64(float %x) #0 {
 ; AVX512-X64-NEXT:    vcvttss2usi %xmm0, %rax
 ; AVX512-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptoui_f32toi64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pushl %ebp
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-NEXT:    .cfi_offset %ebp, -8
-; CHECK-NEXT:    movl %esp, %ebp
-; CHECK-NEXT:    .cfi_def_cfa_register %ebp
-; CHECK-NEXT:    andl $-8, %esp
-; CHECK-NEXT:    subl $16, %esp
-; CHECK-NEXT:    flds 8(%ebp)
-; CHECK-NEXT:    flds {{\.LCPI.*}}
-; CHECK-NEXT:    fcom %st(1)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstsw %ax
-; CHECK-NEXT:    xorl %edx, %edx
-; CHECK-NEXT:    # kill: def $ah killed $ah killed $ax
-; CHECK-NEXT:    sahf
-; CHECK-NEXT:    setbe %al
-; CHECK-NEXT:    fldz
-; CHECK-NEXT:    ja .LBB9_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    fstp %st(0)
-; CHECK-NEXT:    fldz
-; CHECK-NEXT:    fxch %st(1)
-; CHECK-NEXT:  .LBB9_2:
-; CHECK-NEXT:    fstp %st(1)
-; CHECK-NEXT:    fsubrp %st, %st(1)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    orl $3072, %ecx # imm = 0xC00
-; CHECK-NEXT:    movw %cx, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistpll {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movb %al, %dl
-; CHECK-NEXT:    shll $31, %edx
-; CHECK-NEXT:    xorl {{[0-9]+}}(%esp), %edx
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    movl %ebp, %esp
-; CHECK-NEXT:    popl %ebp
-; CHECK-NEXT:    .cfi_def_cfa %esp, 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptoui_f32toi64:
+; X87:       # %bb.0:
+; X87-NEXT:    pushl %ebp
+; X87-NEXT:    .cfi_def_cfa_offset 8
+; X87-NEXT:    .cfi_offset %ebp, -8
+; X87-NEXT:    movl %esp, %ebp
+; X87-NEXT:    .cfi_def_cfa_register %ebp
+; X87-NEXT:    andl $-8, %esp
+; X87-NEXT:    subl $16, %esp
+; X87-NEXT:    flds 8(%ebp)
+; X87-NEXT:    flds {{\.LCPI.*}}
+; X87-NEXT:    fcom %st(1)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstsw %ax
+; X87-NEXT:    xorl %edx, %edx
+; X87-NEXT:    # kill: def $ah killed $ah killed $ax
+; X87-NEXT:    sahf
+; X87-NEXT:    setbe %al
+; X87-NEXT:    fldz
+; X87-NEXT:    ja .LBB9_2
+; X87-NEXT:  # %bb.1:
+; X87-NEXT:    fstp %st(0)
+; X87-NEXT:    fldz
+; X87-NEXT:    fxch %st(1)
+; X87-NEXT:  .LBB9_2:
+; X87-NEXT:    fstp %st(1)
+; X87-NEXT:    fsubrp %st, %st(1)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
+; X87-NEXT:    orl $3072, %ecx # imm = 0xC00
+; X87-NEXT:    movw %cx, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistpll {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movb %al, %dl
+; X87-NEXT:    shll $31, %edx
+; X87-NEXT:    xorl {{[0-9]+}}(%esp), %edx
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    movl %ebp, %esp
+; X87-NEXT:    popl %ebp
+; X87-NEXT:    .cfi_def_cfa %esp, 4
+; X87-NEXT:    retl
   %result = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %x,
                                                metadata !"fpexcept.strict") #0
   ret i64 %result
@@ -746,23 +746,23 @@ define i8 @fptosi_f64toi8(double %x) #0 {
 ; AVX-X64-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptosi_f64toi8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    subl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 12
-; CHECK-NEXT:    fldl {{[0-9]+}}(%esp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistps {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movb {{[0-9]+}}(%esp), %al
-; CHECK-NEXT:    addl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptosi_f64toi8:
+; X87:       # %bb.0:
+; X87-NEXT:    subl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 12
+; X87-NEXT:    fldl {{[0-9]+}}(%esp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistps {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X87-NEXT:    addl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 4
+; X87-NEXT:    retl
   %result = call i8 @llvm.experimental.constrained.fptosi.i8.f64(double %x,
                                                metadata !"fpexcept.strict") #0
   ret i8 %result
@@ -793,23 +793,23 @@ define i16 @fptosi_f64toi16(double %x) #0 {
 ; AVX-X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptosi_f64toi16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    subl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 12
-; CHECK-NEXT:    fldl {{[0-9]+}}(%esp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistps {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    addl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptosi_f64toi16:
+; X87:       # %bb.0:
+; X87-NEXT:    subl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 12
+; X87-NEXT:    fldl {{[0-9]+}}(%esp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistps {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    addl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 4
+; X87-NEXT:    retl
   %result = call i16 @llvm.experimental.constrained.fptosi.i16.f64(double %x,
                                                metadata !"fpexcept.strict") #0
   ret i16 %result
@@ -836,23 +836,23 @@ define i32 @fptosi_f64toi32(double %x) #0 {
 ; AVX-X64-NEXT:    vcvttsd2si %xmm0, %eax
 ; AVX-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptosi_f64toi32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    subl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 12
-; CHECK-NEXT:    fldl {{[0-9]+}}(%esp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw (%esp)
-; CHECK-NEXT:    movzwl (%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistpl {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw (%esp)
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    addl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptosi_f64toi32:
+; X87:       # %bb.0:
+; X87-NEXT:    subl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 12
+; X87-NEXT:    fldl {{[0-9]+}}(%esp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw (%esp)
+; X87-NEXT:    movzwl (%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistpl {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw (%esp)
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    addl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 4
+; X87-NEXT:    retl
   %result = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %x,
                                                metadata !"fpexcept.strict") #0
   ret i32 %result
@@ -917,30 +917,30 @@ define i64 @fptosi_f64toi64(double %x) #0 {
 ; AVX-X64-NEXT:    vcvttsd2si %xmm0, %rax
 ; AVX-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptosi_f64toi64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pushl %ebp
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-NEXT:    .cfi_offset %ebp, -8
-; CHECK-NEXT:    movl %esp, %ebp
-; CHECK-NEXT:    .cfi_def_cfa_register %ebp
-; CHECK-NEXT:    andl $-8, %esp
-; CHECK-NEXT:    subl $16, %esp
-; CHECK-NEXT:    fldl 8(%ebp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistpll {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK-NEXT:    movl %ebp, %esp
-; CHECK-NEXT:    popl %ebp
-; CHECK-NEXT:    .cfi_def_cfa %esp, 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptosi_f64toi64:
+; X87:       # %bb.0:
+; X87-NEXT:    pushl %ebp
+; X87-NEXT:    .cfi_def_cfa_offset 8
+; X87-NEXT:    .cfi_offset %ebp, -8
+; X87-NEXT:    movl %esp, %ebp
+; X87-NEXT:    .cfi_def_cfa_register %ebp
+; X87-NEXT:    andl $-8, %esp
+; X87-NEXT:    subl $16, %esp
+; X87-NEXT:    fldl 8(%ebp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistpll {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X87-NEXT:    movl %ebp, %esp
+; X87-NEXT:    popl %ebp
+; X87-NEXT:    .cfi_def_cfa %esp, 4
+; X87-NEXT:    retl
   %result = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %x,
                                                metadata !"fpexcept.strict") #0
   ret i64 %result
@@ -971,23 +971,23 @@ define i1 @fptoui_f64toi1(double %x) #0 {
 ; AVX-X64-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptoui_f64toi1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    subl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 12
-; CHECK-NEXT:    fldl {{[0-9]+}}(%esp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistps {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movb {{[0-9]+}}(%esp), %al
-; CHECK-NEXT:    addl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptoui_f64toi1:
+; X87:       # %bb.0:
+; X87-NEXT:    subl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 12
+; X87-NEXT:    fldl {{[0-9]+}}(%esp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistps {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X87-NEXT:    addl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 4
+; X87-NEXT:    retl
   %result = call i1 @llvm.experimental.constrained.fptoui.i1.f64(double %x,
                                                metadata !"fpexcept.strict") #0
   ret i1 %result
@@ -1018,23 +1018,23 @@ define i8 @fptoui_f64toi8(double %x) #0 {
 ; AVX-X64-NEXT:    # kill: def $al killed $al killed $eax
 ; AVX-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptoui_f64toi8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    subl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 12
-; CHECK-NEXT:    fldl {{[0-9]+}}(%esp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistps {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movb {{[0-9]+}}(%esp), %al
-; CHECK-NEXT:    addl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptoui_f64toi8:
+; X87:       # %bb.0:
+; X87-NEXT:    subl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 12
+; X87-NEXT:    fldl {{[0-9]+}}(%esp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistps {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X87-NEXT:    addl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 4
+; X87-NEXT:    retl
   %result = call i8 @llvm.experimental.constrained.fptoui.i8.f64(double %x,
                                                metadata !"fpexcept.strict") #0
   ret i8 %result
@@ -1065,24 +1065,24 @@ define i16 @fptoui_f64toi16(double %x) #0 {
 ; AVX-X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; AVX-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptoui_f64toi16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    subl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 12
-; CHECK-NEXT:    fldl {{[0-9]+}}(%esp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw (%esp)
-; CHECK-NEXT:    movzwl (%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistpl {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw (%esp)
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    # kill: def $ax killed $ax killed $eax
-; CHECK-NEXT:    addl $8, %esp
-; CHECK-NEXT:    .cfi_def_cfa_offset 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptoui_f64toi16:
+; X87:       # %bb.0:
+; X87-NEXT:    subl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 12
+; X87-NEXT:    fldl {{[0-9]+}}(%esp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw (%esp)
+; X87-NEXT:    movzwl (%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistpl {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw (%esp)
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    # kill: def $ax killed $ax killed $eax
+; X87-NEXT:    addl $8, %esp
+; X87-NEXT:    .cfi_def_cfa_offset 4
+; X87-NEXT:    retl
   %result = call i16 @llvm.experimental.constrained.fptoui.i16.f64(double %x,
                                                metadata !"fpexcept.strict") #0
   ret i16 %result
@@ -1149,29 +1149,29 @@ define i32 @fptoui_f64toi32(double %x) #0 {
 ; AVX512-X64-NEXT:    vcvttsd2usi %xmm0, %eax
 ; AVX512-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptoui_f64toi32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pushl %ebp
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-NEXT:    .cfi_offset %ebp, -8
-; CHECK-NEXT:    movl %esp, %ebp
-; CHECK-NEXT:    .cfi_def_cfa_register %ebp
-; CHECK-NEXT:    andl $-8, %esp
-; CHECK-NEXT:    subl $16, %esp
-; CHECK-NEXT:    fldl 8(%ebp)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    orl $3072, %eax # imm = 0xC00
-; CHECK-NEXT:    movw %ax, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistpll {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    movl %ebp, %esp
-; CHECK-NEXT:    popl %ebp
-; CHECK-NEXT:    .cfi_def_cfa %esp, 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptoui_f64toi32:
+; X87:       # %bb.0:
+; X87-NEXT:    pushl %ebp
+; X87-NEXT:    .cfi_def_cfa_offset 8
+; X87-NEXT:    .cfi_offset %ebp, -8
+; X87-NEXT:    movl %esp, %ebp
+; X87-NEXT:    .cfi_def_cfa_register %ebp
+; X87-NEXT:    andl $-8, %esp
+; X87-NEXT:    subl $16, %esp
+; X87-NEXT:    fldl 8(%ebp)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    orl $3072, %eax # imm = 0xC00
+; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistpll {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    movl %ebp, %esp
+; X87-NEXT:    popl %ebp
+; X87-NEXT:    .cfi_def_cfa %esp, 4
+; X87-NEXT:    retl
   %result = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x,
                                                metadata !"fpexcept.strict") #0
   ret i32 %result
@@ -1318,49 +1318,49 @@ define i64 @fptoui_f64toi64(double %x) #0 {
 ; AVX512-X64-NEXT:    vcvttsd2usi %xmm0, %rax
 ; AVX512-X64-NEXT:    retq
 ;
-; CHECK-LABEL: fptoui_f64toi64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    pushl %ebp
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-NEXT:    .cfi_offset %ebp, -8
-; CHECK-NEXT:    movl %esp, %ebp
-; CHECK-NEXT:    .cfi_def_cfa_register %ebp
-; CHECK-NEXT:    andl $-8, %esp
-; CHECK-NEXT:    subl $16, %esp
-; CHECK-NEXT:    fldl 8(%ebp)
-; CHECK-NEXT:    flds {{\.LCPI.*}}
-; CHECK-NEXT:    fcom %st(1)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstsw %ax
-; CHECK-NEXT:    xorl %edx, %edx
-; CHECK-NEXT:    # kill: def $ah killed $ah killed $ax
-; CHECK-NEXT:    sahf
-; CHECK-NEXT:    setbe %al
-; CHECK-NEXT:    fldz
-; CHECK-NEXT:    ja .LBB18_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    fstp %st(0)
-; CHECK-NEXT:    fldz
-; CHECK-NEXT:    fxch %st(1)
-; CHECK-NEXT:  .LBB18_2:
-; CHECK-NEXT:    fstp %st(1)
-; CHECK-NEXT:    fsubrp %st, %st(1)
-; CHECK-NEXT:    wait
-; CHECK-NEXT:    fnstcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    orl $3072, %ecx # imm = 0xC00
-; CHECK-NEXT:    movw %cx, {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fistpll {{[0-9]+}}(%esp)
-; CHECK-NEXT:    fldcw {{[0-9]+}}(%esp)
-; CHECK-NEXT:    movb %al, %dl
-; CHECK-NEXT:    shll $31, %edx
-; CHECK-NEXT:    xorl {{[0-9]+}}(%esp), %edx
-; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    movl %ebp, %esp
-; CHECK-NEXT:    popl %ebp
-; CHECK-NEXT:    .cfi_def_cfa %esp, 4
-; CHECK-NEXT:    retl
+; X87-LABEL: fptoui_f64toi64:
+; X87:       # %bb.0:
+; X87-NEXT:    pushl %ebp
+; X87-NEXT:    .cfi_def_cfa_offset 8
+; X87-NEXT:    .cfi_offset %ebp, -8
+; X87-NEXT:    movl %esp, %ebp
+; X87-NEXT:    .cfi_def_cfa_register %ebp
+; X87-NEXT:    andl $-8, %esp
+; X87-NEXT:    subl $16, %esp
+; X87-NEXT:    fldl 8(%ebp)
+; X87-NEXT:    flds {{\.LCPI.*}}
+; X87-NEXT:    fcom %st(1)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstsw %ax
+; X87-NEXT:    xorl %edx, %edx
+; X87-NEXT:    # kill: def $ah killed $ah killed $ax
+; X87-NEXT:    sahf
+; X87-NEXT:    setbe %al
+; X87-NEXT:    fldz
+; X87-NEXT:    ja .LBB18_2
+; X87-NEXT:  # %bb.1:
+; X87-NEXT:    fstp %st(0)
+; X87-NEXT:    fldz
+; X87-NEXT:    fxch %st(1)
+; X87-NEXT:  .LBB18_2:
+; X87-NEXT:    fstp %st(1)
+; X87-NEXT:    fsubrp %st, %st(1)
+; X87-NEXT:    wait
+; X87-NEXT:    fnstcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
+; X87-NEXT:    orl $3072, %ecx # imm = 0xC00
+; X87-NEXT:    movw %cx, {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    fistpll {{[0-9]+}}(%esp)
+; X87-NEXT:    fldcw {{[0-9]+}}(%esp)
+; X87-NEXT:    movb %al, %dl
+; X87-NEXT:    shll $31, %edx
+; X87-NEXT:    xorl {{[0-9]+}}(%esp), %edx
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    movl %ebp, %esp
+; X87-NEXT:    popl %ebp
+; X87-NEXT:    .cfi_def_cfa %esp, 4
+; X87-NEXT:    retl
   %result = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %x,
                                                metadata !"fpexcept.strict") #0
   ret i64 %result

diff  --git a/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll
index 9ca76295fa0a..973e837cd6b3 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar-round.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 -O3 | FileCheck %s --check-prefixes=SSE41,SSE41-X86
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 -O3 | FileCheck %s --check-prefixes=SSE41,SSE41-X64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX-X86
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX-X64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=AVX-X86,AVX512-X86
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=AVX-X64,AVX512-X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 -O3 | FileCheck %s --check-prefix=SSE41-X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 -O3 | FileCheck %s --check-prefix=SSE41-X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefix=AVX-X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefix=AVX-X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefix=AVX-X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefix=AVX-X64
 
 declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
 declare double @llvm.experimental.constrained.ceil.f64(double, metadata)

diff  --git a/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll b/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll
index c199352d1423..6b29ffbb976e 100644
--- a/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll
+++ b/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -O3 | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -O3 | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -O3 | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -O3 | FileCheck %s --check-prefixes=X64
 
 define x86_fp80 @fma(x86_fp80 %x, x86_fp80 %y, x86_fp80 %z) nounwind strictfp {
 ; X86-LABEL: fma:

diff  --git a/llvm/test/CodeGen/X86/fp80-strict-scalar-cmp.ll b/llvm/test/CodeGen/X86/fp80-strict-scalar-cmp.ll
index a3951f826fe0..cb2361fbb8d3 100644
--- a/llvm/test/CodeGen/X86/fp80-strict-scalar-cmp.ll
+++ b/llvm/test/CodeGen/X86/fp80-strict-scalar-cmp.ll
@@ -1,38 +1,38 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse -O3 | FileCheck %s --check-prefixes=CHECK,X87-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -O3 | FileCheck %s --check-prefixes=CHECK,X87-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse -O3 | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -O3 | FileCheck %s --check-prefixes=X64
 
 define i32 @test_oeq_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_oeq_q:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fucompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    jne .LBB0_3
-; X87-32-NEXT:  # %bb.1:
-; X87-32-NEXT:    jp .LBB0_3
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:  .LBB0_3:
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_oeq_q:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fucompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    jne .LBB0_3
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    jp .LBB0_3
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:  .LBB0_3:
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_oeq_q:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fucompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovnel %esi, %eax
-; X87-64-NEXT:    cmovpl %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_oeq_q:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fucompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovnel %esi, %eax
+; X64-NEXT:    cmovpl %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmp.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"oeq",
                                                metadata !"fpexcept.strict") #0
@@ -41,35 +41,35 @@ define i32 @test_oeq_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_ogt_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_ogt_q:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fucompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    ja .LBB1_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB1_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_ogt_q:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fucompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    ja .LBB1_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB1_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_ogt_q:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fucompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovbel %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_ogt_q:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fucompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovbel %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmp.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"ogt",
                                                metadata !"fpexcept.strict") #0
@@ -78,35 +78,35 @@ define i32 @test_ogt_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_oge_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_oge_q:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fucompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jae .LBB2_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB2_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_oge_q:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fucompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jae .LBB2_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB2_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_oge_q:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fucompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovbl %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_oge_q:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fucompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovbl %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmp.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"oge",
                                                metadata !"fpexcept.strict") #0
@@ -115,35 +115,35 @@ define i32 @test_oge_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_olt_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_olt_q:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fucompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    ja .LBB3_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB3_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_olt_q:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fucompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    ja .LBB3_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB3_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_olt_q:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fucompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovbel %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_olt_q:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fucompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovbel %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmp.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"olt",
                                                metadata !"fpexcept.strict") #0
@@ -152,35 +152,35 @@ define i32 @test_olt_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_ole_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_ole_q:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fucompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jae .LBB4_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB4_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_ole_q:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fucompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jae .LBB4_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB4_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_ole_q:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fucompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovbl %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_ole_q:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fucompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovbl %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmp.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"ole",
                                                metadata !"fpexcept.strict") #0
@@ -189,35 +189,35 @@ define i32 @test_ole_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_one_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_one_q:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fucompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jne .LBB5_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB5_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_one_q:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fucompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jne .LBB5_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB5_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_one_q:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fucompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovel %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_one_q:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fucompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovel %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmp.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"one",
                                                metadata !"fpexcept.strict") #0
@@ -226,35 +226,35 @@ define i32 @test_one_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_ord_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_ord_q:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fucompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jnp .LBB6_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB6_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_ord_q:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fucompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jnp .LBB6_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB6_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_ord_q:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fucompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovpl %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_ord_q:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fucompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovpl %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmp.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"ord",
                                                metadata !"fpexcept.strict") #0
@@ -263,35 +263,35 @@ define i32 @test_ord_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_ueq_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_ueq_q:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fucompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    je .LBB7_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB7_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_ueq_q:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fucompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    je .LBB7_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB7_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_ueq_q:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fucompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovnel %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_ueq_q:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fucompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovnel %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmp.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"ueq",
                                                metadata !"fpexcept.strict") #0
@@ -300,35 +300,35 @@ define i32 @test_ueq_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_ugt_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_ugt_q:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fucompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jb .LBB8_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB8_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_ugt_q:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fucompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jb .LBB8_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB8_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_ugt_q:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fucompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovael %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_ugt_q:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fucompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovael %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmp.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"ugt",
                                                metadata !"fpexcept.strict") #0
@@ -337,35 +337,35 @@ define i32 @test_ugt_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_uge_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_uge_q:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fucompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jbe .LBB9_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB9_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_uge_q:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fucompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jbe .LBB9_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB9_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_uge_q:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fucompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmoval %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_uge_q:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fucompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmoval %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmp.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"uge",
                                                metadata !"fpexcept.strict") #0
@@ -374,35 +374,35 @@ define i32 @test_uge_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_ult_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_ult_q:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fucompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jb .LBB10_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB10_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_ult_q:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fucompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jb .LBB10_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB10_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_ult_q:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fucompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovael %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_ult_q:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fucompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovael %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmp.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"ult",
                                                metadata !"fpexcept.strict") #0
@@ -411,35 +411,35 @@ define i32 @test_ult_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_ule_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_ule_q:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fucompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jbe .LBB11_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB11_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_ule_q:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fucompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jbe .LBB11_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB11_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_ule_q:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fucompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmoval %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_ule_q:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fucompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmoval %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmp.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"ule",
                                                metadata !"fpexcept.strict") #0
@@ -448,36 +448,36 @@ define i32 @test_ule_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_une_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_une_q:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fucompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    jne .LBB12_3
-; X87-32-NEXT:  # %bb.1:
-; X87-32-NEXT:    jp .LBB12_3
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:  .LBB12_3:
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_une_q:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fucompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    jne .LBB12_3
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    jp .LBB12_3
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:  .LBB12_3:
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_une_q:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %esi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fucompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovnel %edi, %eax
-; X87-64-NEXT:    cmovpl %edi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_une_q:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fucompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovnel %edi, %eax
+; X64-NEXT:    cmovpl %edi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmp.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"une",
                                                metadata !"fpexcept.strict") #0
@@ -486,35 +486,35 @@ define i32 @test_une_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_uno_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_uno_q:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fucompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jp .LBB13_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB13_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_uno_q:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fucompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jp .LBB13_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB13_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_uno_q:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fucompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovnpl %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_uno_q:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fucompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovnpl %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmp.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"uno",
                                                metadata !"fpexcept.strict") #0
@@ -523,36 +523,36 @@ define i32 @test_uno_q(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_oeq_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_oeq_s:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fcompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    jne .LBB14_3
-; X87-32-NEXT:  # %bb.1:
-; X87-32-NEXT:    jp .LBB14_3
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:  .LBB14_3:
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_oeq_s:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fcompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    jne .LBB14_3
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    jp .LBB14_3
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:  .LBB14_3:
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_oeq_s:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fcompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovnel %esi, %eax
-; X87-64-NEXT:    cmovpl %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_oeq_s:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fcompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovnel %esi, %eax
+; X64-NEXT:    cmovpl %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmps.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"oeq",
                                                metadata !"fpexcept.strict") #0
@@ -561,35 +561,35 @@ define i32 @test_oeq_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_ogt_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_ogt_s:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fcompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    ja .LBB15_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB15_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_ogt_s:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fcompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    ja .LBB15_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB15_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_ogt_s:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fcompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovbel %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_ogt_s:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fcompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovbel %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmps.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"ogt",
                                                metadata !"fpexcept.strict") #0
@@ -598,35 +598,35 @@ define i32 @test_ogt_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_oge_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_oge_s:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fcompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jae .LBB16_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB16_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_oge_s:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fcompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jae .LBB16_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB16_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_oge_s:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fcompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovbl %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_oge_s:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fcompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovbl %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmps.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"oge",
                                                metadata !"fpexcept.strict") #0
@@ -635,35 +635,35 @@ define i32 @test_oge_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_olt_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_olt_s:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fcompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    ja .LBB17_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB17_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_olt_s:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fcompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    ja .LBB17_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB17_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_olt_s:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fcompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovbel %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_olt_s:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fcompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovbel %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmps.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"olt",
                                                metadata !"fpexcept.strict") #0
@@ -672,35 +672,35 @@ define i32 @test_olt_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_ole_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_ole_s:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fcompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jae .LBB18_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB18_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_ole_s:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fcompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jae .LBB18_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB18_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_ole_s:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fcompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovbl %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_ole_s:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fcompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovbl %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmps.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"ole",
                                                metadata !"fpexcept.strict") #0
@@ -709,35 +709,35 @@ define i32 @test_ole_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_one_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_one_s:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fcompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jne .LBB19_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB19_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_one_s:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fcompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jne .LBB19_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB19_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_one_s:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fcompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovel %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_one_s:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fcompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovel %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmps.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"one",
                                                metadata !"fpexcept.strict") #0
@@ -746,35 +746,35 @@ define i32 @test_one_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_ord_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_ord_s:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fcompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jnp .LBB20_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB20_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_ord_s:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fcompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jnp .LBB20_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB20_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_ord_s:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fcompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovpl %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_ord_s:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fcompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovpl %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmps.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"ord",
                                                metadata !"fpexcept.strict") #0
@@ -783,35 +783,35 @@ define i32 @test_ord_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_ueq_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_ueq_s:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fcompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    je .LBB21_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB21_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_ueq_s:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fcompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    je .LBB21_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB21_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_ueq_s:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fcompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovnel %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_ueq_s:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fcompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovnel %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmps.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"ueq",
                                                metadata !"fpexcept.strict") #0
@@ -820,35 +820,35 @@ define i32 @test_ueq_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_ugt_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_ugt_s:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fcompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jb .LBB22_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB22_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_ugt_s:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fcompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jb .LBB22_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB22_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_ugt_s:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fcompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovael %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_ugt_s:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fcompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovael %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmps.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"ugt",
                                                metadata !"fpexcept.strict") #0
@@ -857,35 +857,35 @@ define i32 @test_ugt_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_uge_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_uge_s:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fcompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jbe .LBB23_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB23_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_uge_s:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fcompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jbe .LBB23_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB23_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_uge_s:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fcompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmoval %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_uge_s:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fcompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmoval %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmps.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"uge",
                                                metadata !"fpexcept.strict") #0
@@ -894,35 +894,35 @@ define i32 @test_uge_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_ult_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_ult_s:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fcompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jb .LBB24_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB24_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_ult_s:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fcompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jb .LBB24_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB24_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_ult_s:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fcompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovael %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_ult_s:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fcompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovael %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmps.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"ult",
                                                metadata !"fpexcept.strict") #0
@@ -931,35 +931,35 @@ define i32 @test_ult_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_ule_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_ule_s:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fcompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jbe .LBB25_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB25_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_ule_s:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fcompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jbe .LBB25_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB25_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_ule_s:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fcompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmoval %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_ule_s:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fcompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmoval %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmps.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"ule",
                                                metadata !"fpexcept.strict") #0
@@ -968,36 +968,36 @@ define i32 @test_ule_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_une_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_une_s:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fcompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    jne .LBB26_3
-; X87-32-NEXT:  # %bb.1:
-; X87-32-NEXT:    jp .LBB26_3
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:  .LBB26_3:
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_une_s:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fcompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    jne .LBB26_3
+; X86-NEXT:  # %bb.1:
+; X86-NEXT:    jp .LBB26_3
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:  .LBB26_3:
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_une_s:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %esi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fcompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovnel %edi, %eax
-; X87-64-NEXT:    cmovpl %edi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_une_s:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fcompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovnel %edi, %eax
+; X64-NEXT:    cmovpl %edi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmps.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"une",
                                                metadata !"fpexcept.strict") #0
@@ -1006,35 +1006,35 @@ define i32 @test_une_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
 }
 
 define i32 @test_uno_s(i32 %a, i32 %b, x86_fp80 %f1, x86_fp80 %f2) #0 {
-; X87-32-LABEL: test_uno_s:
-; X87-32:       # %bb.0:
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fldt {{[0-9]+}}(%esp)
-; X87-32-NEXT:    fcompp
-; X87-32-NEXT:    wait
-; X87-32-NEXT:    fnstsw %ax
-; X87-32-NEXT:    # kill: def $ah killed $ah killed $ax
-; X87-32-NEXT:    sahf
-; X87-32-NEXT:    jp .LBB27_1
-; X87-32-NEXT:  # %bb.2:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
-; X87-32-NEXT:  .LBB27_1:
-; X87-32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X87-32-NEXT:    movl (%eax), %eax
-; X87-32-NEXT:    retl
+; X86-LABEL: test_uno_s:
+; X86:       # %bb.0:
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fldt {{[0-9]+}}(%esp)
+; X86-NEXT:    fcompp
+; X86-NEXT:    wait
+; X86-NEXT:    fnstsw %ax
+; X86-NEXT:    # kill: def $ah killed $ah killed $ax
+; X86-NEXT:    sahf
+; X86-NEXT:    jp .LBB27_1
+; X86-NEXT:  # %bb.2:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB27_1:
+; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %eax
+; X86-NEXT:    retl
 ;
-; X87-64-LABEL: test_uno_s:
-; X87-64:       # %bb.0:
-; X87-64-NEXT:    movl %edi, %eax
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fldt {{[0-9]+}}(%rsp)
-; X87-64-NEXT:    fcompi %st(1), %st
-; X87-64-NEXT:    fstp %st(0)
-; X87-64-NEXT:    wait
-; X87-64-NEXT:    cmovnpl %esi, %eax
-; X87-64-NEXT:    retq
+; X64-LABEL: test_uno_s:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fldt {{[0-9]+}}(%rsp)
+; X64-NEXT:    fcompi %st(1), %st
+; X64-NEXT:    fstp %st(0)
+; X64-NEXT:    wait
+; X64-NEXT:    cmovnpl %esi, %eax
+; X64-NEXT:    retq
   %cond = call i1 @llvm.experimental.constrained.fcmps.f80(
                                                x86_fp80 %f1, x86_fp80 %f2, metadata !"uno",
                                                metadata !"fpexcept.strict") #0

diff  --git a/llvm/test/CodeGen/X86/fp80-strict-scalar.ll b/llvm/test/CodeGen/X86/fp80-strict-scalar.ll
index 221bebea2957..c127a25d7ca9 100644
--- a/llvm/test/CodeGen/X86/fp80-strict-scalar.ll
+++ b/llvm/test/CodeGen/X86/fp80-strict-scalar.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -O3 | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -O3 | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -O3 | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -O3 | FileCheck %s --check-prefixes=X64
 
 declare x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80, x86_fp80, metadata, metadata)
 declare x86_fp80 @llvm.experimental.constrained.fsub.f80(x86_fp80, x86_fp80, metadata, metadata)

diff  --git a/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll b/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll
index ee0978ff15ce..85b8930f2aca 100644
--- a/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-cmp-128.ll
@@ -1,12 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE,SSE-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE,SSE-64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX,AVX-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX,AVX-64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512-64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=CHECK,AVX512F-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=CHECK,AVX512F-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=SSE-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=SSE-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=AVX512-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=AVX512-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=AVX512F-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=AVX512F-64
 
 define <4 x i32> @test_v4f32_oeq_q(<4 x i32> %a, <4 x i32> %b, <4 x float> %f1, <4 x float> %f2) #0 {
 ; SSE-32-LABEL: test_v4f32_oeq_q:

diff  --git a/llvm/test/CodeGen/X86/vec-strict-cmp-256.ll b/llvm/test/CodeGen/X86/vec-strict-cmp-256.ll
index dd0dd95daa33..aa16c7c9e50d 100644
--- a/llvm/test/CodeGen/X86/vec-strict-cmp-256.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-cmp-256.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX,AVX-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX,AVX-64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512-64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=CHECK,AVX512F-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=CHECK,AVX512F-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefix=AVX-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefix=AVX-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefix=AVX512-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefix=AVX512-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefix=AVX512F-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefix=AVX512F-64
 
 define <8 x i32> @test_v8f32_oeq_q(<8 x i32> %a, <8 x i32> %b, <8 x float> %f1, <8 x float> %f2) #0 {
 ; AVX-32-LABEL: test_v8f32_oeq_q:

diff  --git a/llvm/test/CodeGen/X86/vec-strict-cmp-512.ll b/llvm/test/CodeGen/X86/vec-strict-cmp-512.ll
index 25b6525737fb..80835b76fd4a 100644
--- a/llvm/test/CodeGen/X86/vec-strict-cmp-512.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-cmp-512.ll
@@ -1,25 +1,25 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=CHECK,AVX512-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=CHECK,AVX512-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=X64
 
 define <16 x i32> @test_v16f32_oeq_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_oeq_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpeqps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_oeq_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpeqps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_oeq_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpeqps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_oeq_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpeqps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"oeq",
                                                metadata !"fpexcept.strict") #0
@@ -28,23 +28,23 @@ define <16 x i32> @test_v16f32_oeq_q(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_ogt_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_ogt_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpgt_oqps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_ogt_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpgt_oqps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_ogt_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmplt_oqps %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_ogt_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmplt_oqps %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"ogt",
                                                metadata !"fpexcept.strict") #0
@@ -53,23 +53,23 @@ define <16 x i32> @test_v16f32_ogt_q(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_oge_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_oge_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpge_oqps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_oge_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpge_oqps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_oge_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmple_oqps %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_oge_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmple_oqps %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"oge",
                                                metadata !"fpexcept.strict") #0
@@ -78,23 +78,23 @@ define <16 x i32> @test_v16f32_oge_q(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_olt_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_olt_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmplt_oqps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_olt_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmplt_oqps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_olt_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmplt_oqps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_olt_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmplt_oqps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"olt",
                                                metadata !"fpexcept.strict") #0
@@ -103,23 +103,23 @@ define <16 x i32> @test_v16f32_olt_q(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_ole_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_ole_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmple_oqps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_ole_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmple_oqps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_ole_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmple_oqps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_ole_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmple_oqps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"ole",
                                                metadata !"fpexcept.strict") #0
@@ -128,23 +128,23 @@ define <16 x i32> @test_v16f32_ole_q(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_one_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_one_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpneq_oqps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_one_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpneq_oqps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_one_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpneq_oqps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_one_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpneq_oqps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"one",
                                                metadata !"fpexcept.strict") #0
@@ -153,23 +153,23 @@ define <16 x i32> @test_v16f32_one_q(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_ord_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_ord_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpordps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_ord_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpordps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_ord_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpordps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_ord_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpordps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"ord",
                                                metadata !"fpexcept.strict") #0
@@ -178,23 +178,23 @@ define <16 x i32> @test_v16f32_ord_q(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_ueq_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_ueq_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpeq_uqps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_ueq_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpeq_uqps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_ueq_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpeq_uqps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_ueq_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpeq_uqps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"ueq",
                                                metadata !"fpexcept.strict") #0
@@ -203,23 +203,23 @@ define <16 x i32> @test_v16f32_ueq_q(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_ugt_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_ugt_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpnle_uqps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_ugt_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpnle_uqps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_ugt_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnle_uqps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_ugt_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnle_uqps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"ugt",
                                                metadata !"fpexcept.strict") #0
@@ -228,23 +228,23 @@ define <16 x i32> @test_v16f32_ugt_q(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_uge_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_uge_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpnlt_uqps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_uge_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpnlt_uqps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_uge_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnlt_uqps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_uge_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnlt_uqps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"uge",
                                                metadata !"fpexcept.strict") #0
@@ -253,23 +253,23 @@ define <16 x i32> @test_v16f32_uge_q(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_ult_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_ult_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpnge_uqps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_ult_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpnge_uqps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_ult_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnle_uqps %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_ult_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnle_uqps %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"ult",
                                                metadata !"fpexcept.strict") #0
@@ -278,23 +278,23 @@ define <16 x i32> @test_v16f32_ult_q(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_ule_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_ule_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpngt_uqps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_ule_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpngt_uqps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_ule_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnlt_uqps %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_ule_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnlt_uqps %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"ule",
                                                metadata !"fpexcept.strict") #0
@@ -303,23 +303,23 @@ define <16 x i32> @test_v16f32_ule_q(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_une_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_une_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpneqps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_une_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpneqps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_une_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpneqps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_une_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpneqps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"une",
                                                metadata !"fpexcept.strict") #0
@@ -328,23 +328,23 @@ define <16 x i32> @test_v16f32_une_q(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_uno_q(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_uno_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpunordps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_uno_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpunordps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_uno_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpunordps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_uno_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpunordps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmp.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"uno",
                                                metadata !"fpexcept.strict") #0
@@ -353,23 +353,23 @@ define <16 x i32> @test_v16f32_uno_q(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <8 x i64> @test_v8f64_oeq_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_oeq_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpeqpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_oeq_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpeqpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_oeq_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpeqpd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_oeq_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpeqpd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"oeq",
                                                metadata !"fpexcept.strict") #0
@@ -378,23 +378,23 @@ define <8 x i64> @test_v8f64_oeq_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_ogt_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_ogt_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpgt_oqpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_ogt_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpgt_oqpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_ogt_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmplt_oqpd %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_ogt_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmplt_oqpd %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"ogt",
                                                metadata !"fpexcept.strict") #0
@@ -403,23 +403,23 @@ define <8 x i64> @test_v8f64_ogt_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_oge_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_oge_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpge_oqpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_oge_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpge_oqpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_oge_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmple_oqpd %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_oge_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmple_oqpd %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"oge",
                                                metadata !"fpexcept.strict") #0
@@ -428,23 +428,23 @@ define <8 x i64> @test_v8f64_oge_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_olt_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_olt_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmplt_oqpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_olt_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmplt_oqpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_olt_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmplt_oqpd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_olt_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmplt_oqpd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"olt",
                                                metadata !"fpexcept.strict") #0
@@ -453,23 +453,23 @@ define <8 x i64> @test_v8f64_olt_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_ole_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_ole_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmple_oqpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_ole_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmple_oqpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_ole_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmple_oqpd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_ole_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmple_oqpd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"ole",
                                                metadata !"fpexcept.strict") #0
@@ -478,23 +478,23 @@ define <8 x i64> @test_v8f64_ole_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_one_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_one_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpneq_oqpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_one_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpneq_oqpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_one_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpneq_oqpd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_one_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpneq_oqpd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"one",
                                                metadata !"fpexcept.strict") #0
@@ -503,23 +503,23 @@ define <8 x i64> @test_v8f64_one_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_ord_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_ord_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpordpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_ord_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpordpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_ord_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpordpd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_ord_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpordpd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"ord",
                                                metadata !"fpexcept.strict") #0
@@ -528,23 +528,23 @@ define <8 x i64> @test_v8f64_ord_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_ueq_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_ueq_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpeq_uqpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_ueq_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpeq_uqpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_ueq_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpeq_uqpd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_ueq_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpeq_uqpd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"ueq",
                                                metadata !"fpexcept.strict") #0
@@ -553,23 +553,23 @@ define <8 x i64> @test_v8f64_ueq_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_ugt_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_ugt_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpnle_uqpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_ugt_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpnle_uqpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_ugt_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnle_uqpd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_ugt_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnle_uqpd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"ugt",
                                                metadata !"fpexcept.strict") #0
@@ -578,23 +578,23 @@ define <8 x i64> @test_v8f64_ugt_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_uge_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_uge_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpnlt_uqpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_uge_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpnlt_uqpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_uge_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnlt_uqpd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_uge_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnlt_uqpd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"uge",
                                                metadata !"fpexcept.strict") #0
@@ -603,23 +603,23 @@ define <8 x i64> @test_v8f64_uge_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_ult_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_ult_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpnge_uqpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_ult_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpnge_uqpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_ult_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnle_uqpd %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_ult_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnle_uqpd %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"ult",
                                                metadata !"fpexcept.strict") #0
@@ -628,23 +628,23 @@ define <8 x i64> @test_v8f64_ult_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_ule_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_ule_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpngt_uqpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_ule_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpngt_uqpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_ule_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnlt_uqpd %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_ule_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnlt_uqpd %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"ule",
                                                metadata !"fpexcept.strict") #0
@@ -653,23 +653,23 @@ define <8 x i64> @test_v8f64_ule_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_une_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_une_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpneqpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_une_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpneqpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_une_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpneqpd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_une_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpneqpd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"une",
                                                metadata !"fpexcept.strict") #0
@@ -678,23 +678,23 @@ define <8 x i64> @test_v8f64_une_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_uno_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_uno_q:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpunordpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_uno_q:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpunordpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_uno_q:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpunordpd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_uno_q:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpunordpd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmp.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"uno",
                                                metadata !"fpexcept.strict") #0
@@ -703,23 +703,23 @@ define <8 x i64> @test_v8f64_uno_q(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <16 x i32> @test_v16f32_oeq_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_oeq_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpeq_osps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_oeq_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpeq_osps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_oeq_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpeq_osps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_oeq_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpeq_osps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"oeq",
                                                metadata !"fpexcept.strict") #0
@@ -728,23 +728,23 @@ define <16 x i32> @test_v16f32_oeq_s(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_ogt_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_ogt_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpgtps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_ogt_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpgtps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_ogt_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpltps %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_ogt_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpltps %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"ogt",
                                                metadata !"fpexcept.strict") #0
@@ -753,23 +753,23 @@ define <16 x i32> @test_v16f32_ogt_s(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_oge_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_oge_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpgeps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_oge_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpgeps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_oge_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpleps %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_oge_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpleps %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"oge",
                                                metadata !"fpexcept.strict") #0
@@ -778,23 +778,23 @@ define <16 x i32> @test_v16f32_oge_s(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_olt_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_olt_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpltps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_olt_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpltps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_olt_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpltps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_olt_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpltps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"olt",
                                                metadata !"fpexcept.strict") #0
@@ -803,23 +803,23 @@ define <16 x i32> @test_v16f32_olt_s(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_ole_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_ole_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpleps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_ole_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpleps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_ole_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpleps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_ole_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpleps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"ole",
                                                metadata !"fpexcept.strict") #0
@@ -828,23 +828,23 @@ define <16 x i32> @test_v16f32_ole_s(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_one_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_one_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpneq_osps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_one_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpneq_osps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_one_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpneq_osps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_one_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpneq_osps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"one",
                                                metadata !"fpexcept.strict") #0
@@ -853,23 +853,23 @@ define <16 x i32> @test_v16f32_one_s(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_ord_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_ord_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpord_sps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_ord_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpord_sps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_ord_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpord_sps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_ord_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpord_sps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"ord",
                                                metadata !"fpexcept.strict") #0
@@ -878,23 +878,23 @@ define <16 x i32> @test_v16f32_ord_s(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_ueq_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_ueq_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpeq_usps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_ueq_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpeq_usps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_ueq_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpeq_usps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_ueq_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpeq_usps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"ueq",
                                                metadata !"fpexcept.strict") #0
@@ -903,23 +903,23 @@ define <16 x i32> @test_v16f32_ueq_s(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_ugt_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_ugt_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpnleps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_ugt_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpnleps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_ugt_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnleps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_ugt_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnleps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"ugt",
                                                metadata !"fpexcept.strict") #0
@@ -928,23 +928,23 @@ define <16 x i32> @test_v16f32_ugt_s(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_uge_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_uge_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpnltps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_uge_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpnltps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_uge_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnltps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_uge_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnltps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"uge",
                                                metadata !"fpexcept.strict") #0
@@ -953,23 +953,23 @@ define <16 x i32> @test_v16f32_uge_s(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_ult_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_ult_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpngeps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_ult_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpngeps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_ult_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnleps %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_ult_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnleps %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"ult",
                                                metadata !"fpexcept.strict") #0
@@ -978,23 +978,23 @@ define <16 x i32> @test_v16f32_ult_s(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_ule_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_ule_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpngtps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_ule_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpngtps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_ule_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnltps %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_ule_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnltps %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"ule",
                                                metadata !"fpexcept.strict") #0
@@ -1003,23 +1003,23 @@ define <16 x i32> @test_v16f32_ule_s(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_une_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_une_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpneq_usps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_une_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpneq_usps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_une_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpneq_usps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_une_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpneq_usps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"une",
                                                metadata !"fpexcept.strict") #0
@@ -1028,23 +1028,23 @@ define <16 x i32> @test_v16f32_une_s(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <16 x i32> @test_v16f32_uno_s(<16 x i32> %a, <16 x i32> %b, <16 x float> %f1, <16 x float> %f2) #0 {
-; AVX512-32-LABEL: test_v16f32_uno_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpunord_sps 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v16f32_uno_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpunord_sps 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v16f32_uno_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpunord_sps %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v16f32_uno_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpunord_sps %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <16 x i1> @llvm.experimental.constrained.fcmps.v16f32(
                                                <16 x float> %f1, <16 x float> %f2, metadata !"uno",
                                                metadata !"fpexcept.strict") #0
@@ -1053,23 +1053,23 @@ define <16 x i32> @test_v16f32_uno_s(<16 x i32> %a, <16 x i32> %b, <16 x float>
 }
 
 define <8 x i64> @test_v8f64_oeq_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_oeq_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpeq_ospd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_oeq_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpeq_ospd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_oeq_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpeq_ospd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_oeq_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpeq_ospd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"oeq",
                                                metadata !"fpexcept.strict") #0
@@ -1078,23 +1078,23 @@ define <8 x i64> @test_v8f64_oeq_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_ogt_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_ogt_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpgtpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_ogt_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpgtpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_ogt_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpltpd %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_ogt_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpltpd %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"ogt",
                                                metadata !"fpexcept.strict") #0
@@ -1103,23 +1103,23 @@ define <8 x i64> @test_v8f64_ogt_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_oge_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_oge_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpgepd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_oge_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpgepd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_oge_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmplepd %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_oge_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmplepd %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"oge",
                                                metadata !"fpexcept.strict") #0
@@ -1128,23 +1128,23 @@ define <8 x i64> @test_v8f64_oge_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_olt_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_olt_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpltpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_olt_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpltpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_olt_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpltpd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_olt_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpltpd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"olt",
                                                metadata !"fpexcept.strict") #0
@@ -1153,23 +1153,23 @@ define <8 x i64> @test_v8f64_olt_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_ole_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_ole_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmplepd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_ole_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmplepd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_ole_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmplepd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_ole_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmplepd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"ole",
                                                metadata !"fpexcept.strict") #0
@@ -1178,23 +1178,23 @@ define <8 x i64> @test_v8f64_ole_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_one_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_one_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpneq_ospd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_one_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpneq_ospd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_one_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpneq_ospd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_one_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpneq_ospd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"one",
                                                metadata !"fpexcept.strict") #0
@@ -1203,23 +1203,23 @@ define <8 x i64> @test_v8f64_one_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_ord_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_ord_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpord_spd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_ord_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpord_spd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_ord_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpord_spd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_ord_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpord_spd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"ord",
                                                metadata !"fpexcept.strict") #0
@@ -1228,23 +1228,23 @@ define <8 x i64> @test_v8f64_ord_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_ueq_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_ueq_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpeq_uspd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_ueq_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpeq_uspd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_ueq_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpeq_uspd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_ueq_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpeq_uspd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"ueq",
                                                metadata !"fpexcept.strict") #0
@@ -1253,23 +1253,23 @@ define <8 x i64> @test_v8f64_ueq_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_ugt_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_ugt_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpnlepd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_ugt_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpnlepd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_ugt_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnlepd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_ugt_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnlepd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"ugt",
                                                metadata !"fpexcept.strict") #0
@@ -1278,23 +1278,23 @@ define <8 x i64> @test_v8f64_ugt_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_uge_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_uge_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpnltpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_uge_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpnltpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_uge_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnltpd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_uge_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnltpd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"uge",
                                                metadata !"fpexcept.strict") #0
@@ -1303,23 +1303,23 @@ define <8 x i64> @test_v8f64_uge_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_ult_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_ult_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpngepd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_ult_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpngepd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_ult_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnlepd %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_ult_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnlepd %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"ult",
                                                metadata !"fpexcept.strict") #0
@@ -1328,23 +1328,23 @@ define <8 x i64> @test_v8f64_ult_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_ule_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_ule_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpngtpd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_ule_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpngtpd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_ule_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpnltpd %zmm2, %zmm3, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_ule_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpnltpd %zmm2, %zmm3, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"ule",
                                                metadata !"fpexcept.strict") #0
@@ -1353,23 +1353,23 @@ define <8 x i64> @test_v8f64_ule_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_une_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_une_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpneq_uspd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_une_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpneq_uspd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_une_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpneq_uspd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_une_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpneq_uspd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"une",
                                                metadata !"fpexcept.strict") #0
@@ -1378,23 +1378,23 @@ define <8 x i64> @test_v8f64_une_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1,
 }
 
 define <8 x i64> @test_v8f64_uno_s(<8 x i64> %a, <8 x i64> %b, <8 x double> %f1, <8 x double> %f2) #0 {
-; AVX512-32-LABEL: test_v8f64_uno_s:
-; AVX512-32:       # %bb.0:
-; AVX512-32-NEXT:    pushl %ebp
-; AVX512-32-NEXT:    movl %esp, %ebp
-; AVX512-32-NEXT:    andl $-64, %esp
-; AVX512-32-NEXT:    subl $64, %esp
-; AVX512-32-NEXT:    vcmpunord_spd 8(%ebp), %zmm2, %k1
-; AVX512-32-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-32-NEXT:    movl %ebp, %esp
-; AVX512-32-NEXT:    popl %ebp
-; AVX512-32-NEXT:    retl
+; X86-LABEL: test_v8f64_uno_s:
+; X86:       # %bb.0:
+; X86-NEXT:    pushl %ebp
+; X86-NEXT:    movl %esp, %ebp
+; X86-NEXT:    andl $-64, %esp
+; X86-NEXT:    subl $64, %esp
+; X86-NEXT:    vcmpunord_spd 8(%ebp), %zmm2, %k1
+; X86-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X86-NEXT:    movl %ebp, %esp
+; X86-NEXT:    popl %ebp
+; X86-NEXT:    retl
 ;
-; AVX512-64-LABEL: test_v8f64_uno_s:
-; AVX512-64:       # %bb.0:
-; AVX512-64-NEXT:    vcmpunord_spd %zmm3, %zmm2, %k1
-; AVX512-64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-64-NEXT:    retq
+; X64-LABEL: test_v8f64_uno_s:
+; X64:       # %bb.0:
+; X64-NEXT:    vcmpunord_spd %zmm3, %zmm2, %k1
+; X64-NEXT:    vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; X64-NEXT:    retq
   %cond = call <8 x i1> @llvm.experimental.constrained.fcmps.v8f64(
                                                <8 x double> %f1, <8 x double> %f2, metadata !"uno",
                                                metadata !"fpexcept.strict") #0

diff  --git a/llvm/test/CodeGen/X86/vec-strict-cmp-sub128.ll b/llvm/test/CodeGen/X86/vec-strict-cmp-sub128.ll
index da8dbe32e840..83606a4bdff9 100644
--- a/llvm/test/CodeGen/X86/vec-strict-cmp-sub128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-cmp-sub128.ll
@@ -1,12 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE,SSE-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE,SSE-64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX,AVX-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX,AVX-64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512-64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=CHECK,AVX512F-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=CHECK,AVX512F-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefix=SSE-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefix=SSE-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefix=AVX-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefix=AVX-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefix=AVX512-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefix=AVX512-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512f -O3 | FileCheck %s --check-prefix=AVX512F-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512f -O3 | FileCheck %s --check-prefix=AVX512F-64
 
 define <2 x i32> @test_v2f32_ogt_s(<2 x i32> %a, <2 x i32> %b, <2 x float> %f1, <2 x float> %f2) #0 {
 ; SSE-32-LABEL: test_v2f32_ogt_s:

diff  --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
index 227c33cf6212..93695371091b 100644
--- a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
@@ -1,16 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE-32
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE-64
-; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX,AVX-32
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX,AVX-64
-; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=CHECK,AVX512F,AVX512F-32
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=CHECK,AVX512F,AVX512F-64
-; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512VL,AVX512VL-32
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512VL,AVX512VL-64
-; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=avx512dq -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQ,AVX512DQ-32
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=avx512dq -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQ,AVX512DQ-64
-; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=avx512dq,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512VLDQ,AVX512VLDQ-32
-; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=avx512dq,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512VLDQ,AVX512VLDQ-64
+; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefix=SSE-32
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefix=SSE-64
+; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX,AVX-32
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX,AVX-64
+; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=AVX512F,AVX512F-32
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=AVX512F,AVX512F-64
+; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=AVX512VL,AVX512VL-32
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=AVX512VL,AVX512VL-64
+; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=avx512dq -O3 | FileCheck %s --check-prefixes=AVX512DQ,AVX512DQ-32
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=avx512dq -O3 | FileCheck %s --check-prefixes=AVX512DQ,AVX512DQ-64
+; RUN: llc < %s -disable-peephole -mtriple=i686-unknown-unknown -mattr=avx512dq,avx512vl -O3 | FileCheck %s --check-prefixes=AVX512VLDQ,AVX512VLDQ-32
+; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=avx512dq,avx512vl -O3 | FileCheck %s --check-prefixes=AVX512VLDQ,AVX512VLDQ-64
 
 declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double>, metadata)
 declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double>, metadata)

diff  --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll
index 034461536484..d4d285c36485 100644
--- a/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll
@@ -5,10 +5,10 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=CHECK,AVX512F,AVX512F-64
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512VL,AVX512VL-32
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512VL,AVX512VL-64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512dq -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQ,AVX512DQ-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512dq -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQ,AVX512DQ-64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512dq,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQVL,AVX512DQVL-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512dq,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQVL,AVX512DQVL-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512dq -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQ
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512dq -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQ
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512dq,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQVL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512dq,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQVL
 
 
 declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f64(<4 x double>, metadata)

diff  --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-512.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-512.ll
index 2791a5582f5b..6f4ab5faaa3b 100644
--- a/llvm/test/CodeGen/X86/vec-strict-fptoint-512.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-512.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512VL,AVX512VL-32
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512VL,AVX512VL-64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512f,avx512dq -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQ,AVX512DQ-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f,avx512dq -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQ,AVX512DQ-64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512f,avx512dq -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQ
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f,avx512dq -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQ
 
 declare <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f64(<8 x double>, metadata)
 declare <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f64(<8 x double>, metadata)

diff  --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
index 8ead5e2f4fa4..52639987f547 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
@@ -5,7 +5,7 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=SSE41,SSE41-64
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX,AVX1,AVX-32,AVX1-32
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX,AVX1,AVX-64,AVX1-64
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=AVX,AVX512F,AVX-32,AVX512F-32
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=AVX,AVX512F,AVX-32
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=AVX,AVX512F,AVX-64,AVX512F-64
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512vl -O3 | FileCheck %s --check-prefixes=AVX,AVX512VL,AVX-32,AVX512VL-32
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl -O3 | FileCheck %s --check-prefixes=AVX,AVX512VL,AVX-64,AVX512VL-64

diff  --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
index 82af829506bd..a274baefc1ef 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
@@ -1,12 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX1,AVX-32,AVX1-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX1,AVX-64,AVX1-64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX1,AVX1-64
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 -O3 | FileCheck %s --check-prefixes=CHECK,AVX2,AVX-32,AVX2-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 -O3 | FileCheck %s --check-prefixes=CHECK,AVX2,AVX-64,AVX2-64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 -O3 | FileCheck %s --check-prefixes=CHECK,AVX2,AVX2-64
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=CHECK,AVX512F,AVX-32,AVX512F-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=CHECK,AVX512F,AVX-64,AVX512F-64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O3 | FileCheck %s --check-prefixes=CHECK,AVX512F,AVX512F-64
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512VL,AVX-32,AVX512VL-32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512VL,AVX-64,AVX512VL-64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512VL,AVX512VL-64
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512dq -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQ,AVX512DQ-32
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512dq -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQ,AVX512DQ-64
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512dq,avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX512DQVL,AVX512DQVL-32


        


More information about the llvm-commits mailing list