[llvm] 14ab619 - [X86] Cleanup check prefixes in FCANONICALIZE tests for better sharing

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 24 01:20:30 PDT 2024


Author: Simon Pilgrim
Date: 2024-09-24T09:20:09+01:00
New Revision: 14ab6190f6d1813cc5774dec2623862e1bd6876f

URL: https://github.com/llvm/llvm-project/commit/14ab6190f6d1813cc5774dec2623862e1bd6876f
DIFF: https://github.com/llvm/llvm-project/commit/14ab6190f6d1813cc5774dec2623862e1bd6876f.diff

LOG: [X86] Cleanup check prefixes in FCANONICALIZE tests for better sharing

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/canonicalize-vars-f16-type.ll
    llvm/test/CodeGen/X86/canonicalize-vars.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/canonicalize-vars-f16-type.ll b/llvm/test/CodeGen/X86/canonicalize-vars-f16-type.ll
index 52048a0a2065bc..fdf0bf3f692d62 100644
--- a/llvm/test/CodeGen/X86/canonicalize-vars-f16-type.ll
+++ b/llvm/test/CodeGen/X86/canonicalize-vars-f16-type.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --default-march x86_64-unknown-linux-gnu --version 5
 ; RUN: llc -mattr=+sse2 -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=SSE
-; RUN: llc -mattr=+avx -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX1
-; RUN: llc -mattr=+avx2 -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX2
-; RUN: llc -mattr=+avx512f -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX512F
-; RUN: llc -mattr=+avx512bw -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX512BW
+; RUN: llc -mattr=+avx -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX,AVX1
+; RUN: llc -mattr=+avx2 -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX,AVX2
+; RUN: llc -mattr=+avx512f -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX512,AVX512F
+; RUN: llc -mattr=+avx512bw -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX512,AVX512BW
 
 define void @v_test_canonicalize__half(half addrspace(1)* %out) nounwind {
 ; SSE-LABEL: v_test_canonicalize__half:
@@ -24,71 +24,38 @@ define void @v_test_canonicalize__half(half addrspace(1)* %out) nounwind {
 ; SSE-NEXT:    popq %rbx
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: v_test_canonicalize__half:
-; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    pushq %rbx
-; AVX1-NEXT:    subq $16, %rsp
-; AVX1-NEXT:    movq %rdi, %rbx
-; AVX1-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
-; AVX1-NEXT:    callq __extendhfsf2 at PLT
-; AVX1-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; AVX1-NEXT:    vpinsrw $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT:    callq __extendhfsf2 at PLT
-; AVX1-NEXT:    vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
-; AVX1-NEXT:    callq __truncsfhf2 at PLT
-; AVX1-NEXT:    vpextrw $0, %xmm0, (%rbx)
-; AVX1-NEXT:    addq $16, %rsp
-; AVX1-NEXT:    popq %rbx
-; AVX1-NEXT:    retq
+; AVX-LABEL: v_test_canonicalize__half:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    subq $16, %rsp
+; AVX-NEXT:    movq %rdi, %rbx
+; AVX-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX-NEXT:    callq __extendhfsf2 at PLT
+; AVX-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; AVX-NEXT:    vpinsrw $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    callq __extendhfsf2 at PLT
+; AVX-NEXT:    vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
+; AVX-NEXT:    callq __truncsfhf2 at PLT
+; AVX-NEXT:    vpextrw $0, %xmm0, (%rbx)
+; AVX-NEXT:    addq $16, %rsp
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    retq
 ;
-; AVX2-LABEL: v_test_canonicalize__half:
-; AVX2:       # %bb.0: # %entry
-; AVX2-NEXT:    pushq %rbx
-; AVX2-NEXT:    subq $16, %rsp
-; AVX2-NEXT:    movq %rdi, %rbx
-; AVX2-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
-; AVX2-NEXT:    callq __extendhfsf2 at PLT
-; AVX2-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; AVX2-NEXT:    vpinsrw $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT:    callq __extendhfsf2 at PLT
-; AVX2-NEXT:    vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
-; AVX2-NEXT:    callq __truncsfhf2 at PLT
-; AVX2-NEXT:    vpextrw $0, %xmm0, (%rbx)
-; AVX2-NEXT:    addq $16, %rsp
-; AVX2-NEXT:    popq %rbx
-; AVX2-NEXT:    retq
-;
-; AVX512F-LABEL: v_test_canonicalize__half:
-; AVX512F:       # %bb.0: # %entry
-; AVX512F-NEXT:    movzwl (%rdi), %eax
-; AVX512F-NEXT:    movzwl {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ecx
-; AVX512F-NEXT:    vmovd %ecx, %xmm0
-; AVX512F-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512F-NEXT:    vmovd %eax, %xmm1
-; AVX512F-NEXT:    vcvtph2ps %xmm1, %xmm1
-; AVX512F-NEXT:    vmulss %xmm0, %xmm1, %xmm0
-; AVX512F-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; AVX512F-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; AVX512F-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512F-NEXT:    vmovd %xmm0, %eax
-; AVX512F-NEXT:    movw %ax, (%rdi)
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: v_test_canonicalize__half:
-; AVX512BW:       # %bb.0: # %entry
-; AVX512BW-NEXT:    movzwl (%rdi), %eax
-; AVX512BW-NEXT:    movzwl {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ecx
-; AVX512BW-NEXT:    vmovd %ecx, %xmm0
-; AVX512BW-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512BW-NEXT:    vmovd %eax, %xmm1
-; AVX512BW-NEXT:    vcvtph2ps %xmm1, %xmm1
-; AVX512BW-NEXT:    vmulss %xmm0, %xmm1, %xmm0
-; AVX512BW-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; AVX512BW-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; AVX512BW-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512BW-NEXT:    vmovd %xmm0, %eax
-; AVX512BW-NEXT:    movw %ax, (%rdi)
-; AVX512BW-NEXT:    retq
+; AVX512-LABEL: v_test_canonicalize__half:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    movzwl (%rdi), %eax
+; AVX512-NEXT:    movzwl {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ecx
+; AVX512-NEXT:    vmovd %ecx, %xmm0
+; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX512-NEXT:    vmulss %xmm0, %xmm1, %xmm0
+; AVX512-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; AVX512-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    movw %ax, (%rdi)
+; AVX512-NEXT:    retq
 entry:
   %val = load half, half addrspace(1)* %out
   %canonicalized = call half @llvm.canonicalize.f16(half %val)
@@ -131,135 +98,70 @@ define half @complex_canonicalize_fmul_half(half %a, half %b) nounwind {
 ; SSE-NEXT:    popq %rax
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: complex_canonicalize_fmul_half:
-; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    pushq %rax
-; AVX1-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; AVX1-NEXT:    callq __extendhfsf2 at PLT
-; AVX1-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
-; AVX1-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
-; AVX1-NEXT:    # xmm0 = mem[0],zero,zero,zero
-; AVX1-NEXT:    callq __extendhfsf2 at PLT
-; AVX1-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; AVX1-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
-; AVX1-NEXT:    # xmm1 = mem[0],zero,zero,zero
-; AVX1-NEXT:    vsubss %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    callq __truncsfhf2 at PLT
-; AVX1-NEXT:    callq __extendhfsf2 at PLT
-; AVX1-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
-; AVX1-NEXT:    vaddss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
-; AVX1-NEXT:    callq __truncsfhf2 at PLT
-; AVX1-NEXT:    callq __extendhfsf2 at PLT
-; AVX1-NEXT:    vsubss (%rsp), %xmm0, %xmm0 # 4-byte Folded Reload
-; AVX1-NEXT:    callq __truncsfhf2 at PLT
-; AVX1-NEXT:    callq __extendhfsf2 at PLT
-; AVX1-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
-; AVX1-NEXT:    vpinsrw $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT:    callq __extendhfsf2 at PLT
-; AVX1-NEXT:    vmulss (%rsp), %xmm0, %xmm0 # 4-byte Folded Reload
-; AVX1-NEXT:    callq __truncsfhf2 at PLT
-; AVX1-NEXT:    callq __extendhfsf2 at PLT
-; AVX1-NEXT:    vsubss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
-; AVX1-NEXT:    callq __truncsfhf2 at PLT
-; AVX1-NEXT:    popq %rax
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: complex_canonicalize_fmul_half:
-; AVX2:       # %bb.0: # %entry
-; AVX2-NEXT:    pushq %rax
-; AVX2-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; AVX2-NEXT:    callq __extendhfsf2 at PLT
-; AVX2-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
-; AVX2-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
-; AVX2-NEXT:    # xmm0 = mem[0],zero,zero,zero
-; AVX2-NEXT:    callq __extendhfsf2 at PLT
-; AVX2-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; AVX2-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
-; AVX2-NEXT:    # xmm1 = mem[0],zero,zero,zero
-; AVX2-NEXT:    vsubss %xmm0, %xmm1, %xmm0
-; AVX2-NEXT:    callq __truncsfhf2 at PLT
-; AVX2-NEXT:    callq __extendhfsf2 at PLT
-; AVX2-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
-; AVX2-NEXT:    vaddss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
-; AVX2-NEXT:    callq __truncsfhf2 at PLT
-; AVX2-NEXT:    callq __extendhfsf2 at PLT
-; AVX2-NEXT:    vsubss (%rsp), %xmm0, %xmm0 # 4-byte Folded Reload
-; AVX2-NEXT:    callq __truncsfhf2 at PLT
-; AVX2-NEXT:    callq __extendhfsf2 at PLT
-; AVX2-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
-; AVX2-NEXT:    vpinsrw $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT:    callq __extendhfsf2 at PLT
-; AVX2-NEXT:    vmulss (%rsp), %xmm0, %xmm0 # 4-byte Folded Reload
-; AVX2-NEXT:    callq __truncsfhf2 at PLT
-; AVX2-NEXT:    callq __extendhfsf2 at PLT
-; AVX2-NEXT:    vsubss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
-; AVX2-NEXT:    callq __truncsfhf2 at PLT
-; AVX2-NEXT:    popq %rax
-; AVX2-NEXT:    retq
+; AVX-LABEL: complex_canonicalize_fmul_half:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    pushq %rax
+; AVX-NEXT:    vmovss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVX-NEXT:    callq __extendhfsf2 at PLT
+; AVX-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
+; AVX-NEXT:    vmovss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
+; AVX-NEXT:    # xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT:    callq __extendhfsf2 at PLT
+; AVX-NEXT:    vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVX-NEXT:    vmovss (%rsp), %xmm1 # 4-byte Reload
+; AVX-NEXT:    # xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT:    vsubss %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    callq __truncsfhf2 at PLT
+; AVX-NEXT:    callq __extendhfsf2 at PLT
+; AVX-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
+; AVX-NEXT:    vaddss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
+; AVX-NEXT:    callq __truncsfhf2 at PLT
+; AVX-NEXT:    callq __extendhfsf2 at PLT
+; AVX-NEXT:    vsubss (%rsp), %xmm0, %xmm0 # 4-byte Folded Reload
+; AVX-NEXT:    callq __truncsfhf2 at PLT
+; AVX-NEXT:    callq __extendhfsf2 at PLT
+; AVX-NEXT:    vmovss %xmm0, (%rsp) # 4-byte Spill
+; AVX-NEXT:    vpinsrw $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    callq __extendhfsf2 at PLT
+; AVX-NEXT:    vmulss (%rsp), %xmm0, %xmm0 # 4-byte Folded Reload
+; AVX-NEXT:    callq __truncsfhf2 at PLT
+; AVX-NEXT:    callq __extendhfsf2 at PLT
+; AVX-NEXT:    vsubss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
+; AVX-NEXT:    callq __truncsfhf2 at PLT
+; AVX-NEXT:    popq %rax
+; AVX-NEXT:    retq
 ;
-; AVX512F-LABEL: complex_canonicalize_fmul_half:
-; AVX512F:       # %bb.0: # %entry
-; AVX512F-NEXT:    vpextrw $0, %xmm1, %eax
-; AVX512F-NEXT:    vpextrw $0, %xmm0, %ecx
-; AVX512F-NEXT:    vmovd %ecx, %xmm0
-; AVX512F-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512F-NEXT:    vmovd %eax, %xmm1
-; AVX512F-NEXT:    vcvtph2ps %xmm1, %xmm1
-; AVX512F-NEXT:    vsubss %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512F-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512F-NEXT:    vaddss %xmm1, %xmm0, %xmm2
-; AVX512F-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX512F-NEXT:    vcvtph2ps %xmm2, %xmm2
-; AVX512F-NEXT:    vsubss %xmm0, %xmm2, %xmm0
-; AVX512F-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; AVX512F-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512F-NEXT:    movzwl {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %eax
-; AVX512F-NEXT:    vmovd %eax, %xmm2
-; AVX512F-NEXT:    vcvtph2ps %xmm2, %xmm2
-; AVX512F-NEXT:    vmulss %xmm2, %xmm0, %xmm0
-; AVX512F-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; AVX512F-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
-; AVX512F-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512F-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512F-NEXT:    vsubss %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512F-NEXT:    vmovd %xmm0, %eax
-; AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; AVX512F-NEXT:    retq
-;
-; AVX512BW-LABEL: complex_canonicalize_fmul_half:
-; AVX512BW:       # %bb.0: # %entry
-; AVX512BW-NEXT:    vpextrw $0, %xmm1, %eax
-; AVX512BW-NEXT:    vpextrw $0, %xmm0, %ecx
-; AVX512BW-NEXT:    vmovd %ecx, %xmm0
-; AVX512BW-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512BW-NEXT:    vmovd %eax, %xmm1
-; AVX512BW-NEXT:    vcvtph2ps %xmm1, %xmm1
-; AVX512BW-NEXT:    vsubss %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512BW-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512BW-NEXT:    vaddss %xmm1, %xmm0, %xmm2
-; AVX512BW-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX512BW-NEXT:    vcvtph2ps %xmm2, %xmm2
-; AVX512BW-NEXT:    vsubss %xmm0, %xmm2, %xmm0
-; AVX512BW-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; AVX512BW-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512BW-NEXT:    movzwl {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %eax
-; AVX512BW-NEXT:    vmovd %eax, %xmm2
-; AVX512BW-NEXT:    vcvtph2ps %xmm2, %xmm2
-; AVX512BW-NEXT:    vmulss %xmm2, %xmm0, %xmm0
-; AVX512BW-NEXT:    vxorps %xmm2, %xmm2, %xmm2
-; AVX512BW-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
-; AVX512BW-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512BW-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512BW-NEXT:    vsubss %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512BW-NEXT:    vmovd %xmm0, %eax
-; AVX512BW-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; AVX512BW-NEXT:    retq
+; AVX512-LABEL: complex_canonicalize_fmul_half:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    vpextrw $0, %xmm1, %eax
+; AVX512-NEXT:    vpextrw $0, %xmm0, %ecx
+; AVX512-NEXT:    vmovd %ecx, %xmm0
+; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX512-NEXT:    vsubss %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT:    vaddss %xmm1, %xmm0, %xmm2
+; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT:    vsubss %xmm0, %xmm2, %xmm0
+; AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT:    movzwl {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %eax
+; AVX512-NEXT:    vmovd %eax, %xmm2
+; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT:    vmulss %xmm2, %xmm0, %xmm0
+; AVX512-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT:    vsubss %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; AVX512-NEXT:    retq
 entry:
 
   %mul1 = fsub half %a, %b
@@ -303,109 +205,57 @@ define void @v_test_canonicalize_v2half(<2 x half> addrspace(1)* %out) nounwind
 ; SSE-NEXT:    popq %rbx
 ; SSE-NEXT:    retq
 ;
-; AVX1-LABEL: v_test_canonicalize_v2half:
-; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    pushq %rbx
-; AVX1-NEXT:    subq $48, %rsp
-; AVX1-NEXT:    movq %rdi, %rbx
-; AVX1-NEXT:    vpinsrw $0, 2(%rdi), %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
-; AVX1-NEXT:    callq __extendhfsf2 at PLT
-; AVX1-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; AVX1-NEXT:    vpinsrw $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT:    callq __extendhfsf2 at PLT
-; AVX1-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; AVX1-NEXT:    vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
-; AVX1-NEXT:    callq __truncsfhf2 at PLT
-; AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    callq __extendhfsf2 at PLT
-; AVX1-NEXT:    vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
-; AVX1-NEXT:    callq __truncsfhf2 at PLT
-; AVX1-NEXT:    vpextrw $0, %xmm0, 2(%rbx)
-; AVX1-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-NEXT:    vpextrw $0, %xmm0, (%rbx)
-; AVX1-NEXT:    addq $48, %rsp
-; AVX1-NEXT:    popq %rbx
-; AVX1-NEXT:    retq
-;
-; AVX2-LABEL: v_test_canonicalize_v2half:
-; AVX2:       # %bb.0: # %entry
-; AVX2-NEXT:    pushq %rbx
-; AVX2-NEXT:    subq $48, %rsp
-; AVX2-NEXT:    movq %rdi, %rbx
-; AVX2-NEXT:    vpinsrw $0, 2(%rdi), %xmm0, %xmm0
-; AVX2-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
-; AVX2-NEXT:    callq __extendhfsf2 at PLT
-; AVX2-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; AVX2-NEXT:    vpinsrw $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT:    callq __extendhfsf2 at PLT
-; AVX2-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
-; AVX2-NEXT:    vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
-; AVX2-NEXT:    callq __truncsfhf2 at PLT
-; AVX2-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-NEXT:    callq __extendhfsf2 at PLT
-; AVX2-NEXT:    vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
-; AVX2-NEXT:    callq __truncsfhf2 at PLT
-; AVX2-NEXT:    vpextrw $0, %xmm0, 2(%rbx)
-; AVX2-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-NEXT:    vpextrw $0, %xmm0, (%rbx)
-; AVX2-NEXT:    addq $48, %rsp
-; AVX2-NEXT:    popq %rbx
-; AVX2-NEXT:    retq
-;
-; AVX512F-LABEL: v_test_canonicalize_v2half:
-; AVX512F:       # %bb.0: # %entry
-; AVX512F-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX512F-NEXT:    movzwl {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %eax
-; AVX512F-NEXT:    vmovd %eax, %xmm1
-; AVX512F-NEXT:    vcvtph2ps %xmm1, %xmm1
-; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[2,3],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
-; AVX512F-NEXT:    vcvtph2ps %xmm2, %xmm2
-; AVX512F-NEXT:    vmulss %xmm1, %xmm2, %xmm2
-; AVX512F-NEXT:    vxorps %xmm3, %xmm3, %xmm3
-; AVX512F-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3]
-; AVX512F-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX512F-NEXT:    vmovd %xmm2, %eax
-; AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
-; AVX512F-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; AVX512F-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512F-NEXT:    vmulss %xmm1, %xmm0, %xmm0
-; AVX512F-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3]
-; AVX512F-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512F-NEXT:    vmovd %xmm0, %eax
-; AVX512F-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; AVX512F-NEXT:    vmovd %xmm0, (%rdi)
-; AVX512F-NEXT:    retq
+; AVX-LABEL: v_test_canonicalize_v2half:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    subq $48, %rsp
+; AVX-NEXT:    movq %rdi, %rbx
+; AVX-NEXT:    vpinsrw $0, 2(%rdi), %xmm0, %xmm0
+; AVX-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX-NEXT:    callq __extendhfsf2 at PLT
+; AVX-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; AVX-NEXT:    vpinsrw $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    callq __extendhfsf2 at PLT
+; AVX-NEXT:    vmovd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; AVX-NEXT:    vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
+; AVX-NEXT:    callq __truncsfhf2 at PLT
+; AVX-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT:    callq __extendhfsf2 at PLT
+; AVX-NEXT:    vmulss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload
+; AVX-NEXT:    callq __truncsfhf2 at PLT
+; AVX-NEXT:    vpextrw $0, %xmm0, 2(%rbx)
+; AVX-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX-NEXT:    vpextrw $0, %xmm0, (%rbx)
+; AVX-NEXT:    addq $48, %rsp
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    retq
 ;
-; AVX512BW-LABEL: v_test_canonicalize_v2half:
-; AVX512BW:       # %bb.0: # %entry
-; AVX512BW-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX512BW-NEXT:    movzwl {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %eax
-; AVX512BW-NEXT:    vmovd %eax, %xmm1
-; AVX512BW-NEXT:    vcvtph2ps %xmm1, %xmm1
-; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[2,3],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
-; AVX512BW-NEXT:    vcvtph2ps %xmm2, %xmm2
-; AVX512BW-NEXT:    vmulss %xmm1, %xmm2, %xmm2
-; AVX512BW-NEXT:    vxorps %xmm3, %xmm3, %xmm3
-; AVX512BW-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3]
-; AVX512BW-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
-; AVX512BW-NEXT:    vmovd %xmm2, %eax
-; AVX512BW-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
-; AVX512BW-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; AVX512BW-NEXT:    vcvtph2ps %xmm0, %xmm0
-; AVX512BW-NEXT:    vmulss %xmm1, %xmm0, %xmm0
-; AVX512BW-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3]
-; AVX512BW-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
-; AVX512BW-NEXT:    vmovd %xmm0, %eax
-; AVX512BW-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
-; AVX512BW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; AVX512BW-NEXT:    vmovd %xmm0, (%rdi)
-; AVX512BW-NEXT:    retq
+; AVX512-LABEL: v_test_canonicalize_v2half:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    movzwl {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %eax
+; AVX512-NEXT:    vmovd %eax, %xmm1
+; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX512-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[2,3],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
+; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT:    vmulss %xmm1, %xmm2, %xmm2
+; AVX512-NEXT:    vxorps %xmm3, %xmm3, %xmm3
+; AVX512-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; AVX512-NEXT:    vmovd %xmm2, %eax
+; AVX512-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm2
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT:    vmulss %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm3[1,2,3]
+; AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT:    vmovd %xmm0, %eax
+; AVX512-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX512-NEXT:    vmovd %xmm0, (%rdi)
+; AVX512-NEXT:    retq
 entry:
   %val = load <2 x half>, <2 x half> addrspace(1)* %out
   %canonicalized = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %val)
@@ -413,3 +263,8 @@ entry:
   ret void
 }
 
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; AVX1: {{.*}}
+; AVX2: {{.*}}
+; AVX512BW: {{.*}}
+; AVX512F: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/canonicalize-vars.ll b/llvm/test/CodeGen/X86/canonicalize-vars.ll
index 13ea53389411bc..951ea1b72f4390 100644
--- a/llvm/test/CodeGen/X86/canonicalize-vars.ll
+++ b/llvm/test/CodeGen/X86/canonicalize-vars.ll
@@ -1,97 +1,95 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --default-march x86_64-unknown-linux-gnu --version 5
-; RUN: llc -mtriple=i686-- --mattr=-sse2 < %s | FileCheck %s -check-prefixes=SSE1
-; RUN: llc -mattr=+sse2 -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=SSE2
-; RUN: llc -mattr=+avx -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX1
-; RUN: llc -mattr=+avx2 -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX1,AVX2
-; RUN: llc -mattr=+avx512f -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX1,AVX512F
+; RUN: llc -mtriple=i686-- < %s | FileCheck %s -check-prefixes=X87
+; RUN: llc -mattr=+sse2 -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=SSE,SSE2
+; RUN: llc -mattr=+avx -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX,AVX1
+; RUN: llc -mattr=+avx2 -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX,AVX2
+; RUN: llc -mattr=+avx512f -mtriple=x86_64 < %s | FileCheck %s -check-prefixes=AVX,AVX512F
 
 define float @canon_fp32_varargsf32(float %a) {
-; SSE1-LABEL: canon_fp32_varargsf32:
-; SSE1:       # %bb.0:
-; SSE1-NEXT:    fld1
-; SSE1-NEXT:    fmuls {{[0-9]+}}(%esp)
-; SSE1-NEXT:    retl
-;
-; SSE2-LABEL: canon_fp32_varargsf32:
-; SSE2:       # %bb.0:
-; SSE2-NEXT:    mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    retq
-;
-; AVX1-LABEL: canon_fp32_varargsf32:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT:    retq
-
+; X87-LABEL: canon_fp32_varargsf32:
+; X87:       # %bb.0:
+; X87-NEXT:    fld1
+; X87-NEXT:    fmuls {{[0-9]+}}(%esp)
+; X87-NEXT:    retl
+;
+; SSE-LABEL: canon_fp32_varargsf32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: canon_fp32_varargsf32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    retq
   %canonicalized = call float @llvm.canonicalize.f32(float %a)
   ret float %canonicalized
 }
 
 define x86_fp80 @canon_fp32_varargsf80(x86_fp80 %a) {
-; SSE1-LABEL: canon_fp32_varargsf80:
-; SSE1:       # %bb.0:
-; SSE1-NEXT:    fldt {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fld1
-; SSE1-NEXT:    fmulp %st, %st(1)
-; SSE1-NEXT:    retl
-;
-; SSE2-LABEL: canon_fp32_varargsf80:
-; SSE2:       # %bb.0:
-; SSE2-NEXT:    fldt {{[0-9]+}}(%rsp)
-; SSE2-NEXT:    fld1
-; SSE2-NEXT:    fmulp %st, %st(1)
-; SSE2-NEXT:    retq
-;
-; AVX1-LABEL: canon_fp32_varargsf80:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    fldt {{[0-9]+}}(%rsp)
-; AVX1-NEXT:    fld1
-; AVX1-NEXT:    fmulp %st, %st(1)
-; AVX1-NEXT:    retq
+; X87-LABEL: canon_fp32_varargsf80:
+; X87:       # %bb.0:
+; X87-NEXT:    fldt {{[0-9]+}}(%esp)
+; X87-NEXT:    fld1
+; X87-NEXT:    fmulp %st, %st(1)
+; X87-NEXT:    retl
+;
+; SSE-LABEL: canon_fp32_varargsf80:
+; SSE:       # %bb.0:
+; SSE-NEXT:    fldt {{[0-9]+}}(%rsp)
+; SSE-NEXT:    fld1
+; SSE-NEXT:    fmulp %st, %st(1)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: canon_fp32_varargsf80:
+; AVX:       # %bb.0:
+; AVX-NEXT:    fldt {{[0-9]+}}(%rsp)
+; AVX-NEXT:    fld1
+; AVX-NEXT:    fmulp %st, %st(1)
+; AVX-NEXT:    retq
   %canonicalized = call x86_fp80 @llvm.canonicalize.f80(x86_fp80 %a)
   ret x86_fp80 %canonicalized
 }
 
 define x86_fp80 @complex_canonicalize_fmul_x86_fp80(x86_fp80 %a, x86_fp80 %b) {
-; SSE1-LABEL: complex_canonicalize_fmul_x86_fp80:
-; SSE1:       # %bb.0: # %entry
-; SSE1-NEXT:    fldt {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fldt {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fsub %st(1), %st
-; SSE1-NEXT:    fld %st(0)
-; SSE1-NEXT:    fadd %st(2), %st
-; SSE1-NEXT:    fsubp %st, %st(1)
-; SSE1-NEXT:    fld1
-; SSE1-NEXT:    fmulp %st, %st(1)
-; SSE1-NEXT:    fsubp %st, %st(1)
-; SSE1-NEXT:    retl
-;
-; SSE2-LABEL: complex_canonicalize_fmul_x86_fp80:
-; SSE2:       # %bb.0: # %entry
-; SSE2-NEXT:    fldt {{[0-9]+}}(%rsp)
-; SSE2-NEXT:    fldt {{[0-9]+}}(%rsp)
-; SSE2-NEXT:    fsub %st(1), %st
-; SSE2-NEXT:    fld %st(0)
-; SSE2-NEXT:    fadd %st(2), %st
-; SSE2-NEXT:    fsubp %st, %st(1)
-; SSE2-NEXT:    fld1
-; SSE2-NEXT:    fmulp %st, %st(1)
-; SSE2-NEXT:    fsubp %st, %st(1)
-; SSE2-NEXT:    retq
-;
-; AVX1-LABEL: complex_canonicalize_fmul_x86_fp80:
-; AVX1:       # %bb.0: # %entry
-; AVX1-NEXT:    fldt {{[0-9]+}}(%rsp)
-; AVX1-NEXT:    fldt {{[0-9]+}}(%rsp)
-; AVX1-NEXT:    fsub %st(1), %st
-; AVX1-NEXT:    fld %st(0)
-; AVX1-NEXT:    fadd %st(2), %st
-; AVX1-NEXT:    fsubp %st, %st(1)
-; AVX1-NEXT:    fld1
-; AVX1-NEXT:    fmulp %st, %st(1)
-; AVX1-NEXT:    fsubp %st, %st(1)
-; AVX1-NEXT:    retq
+; X87-LABEL: complex_canonicalize_fmul_x86_fp80:
+; X87:       # %bb.0: # %entry
+; X87-NEXT:    fldt {{[0-9]+}}(%esp)
+; X87-NEXT:    fldt {{[0-9]+}}(%esp)
+; X87-NEXT:    fsub %st(1), %st
+; X87-NEXT:    fld %st(0)
+; X87-NEXT:    fadd %st(2), %st
+; X87-NEXT:    fsubp %st, %st(1)
+; X87-NEXT:    fld1
+; X87-NEXT:    fmulp %st, %st(1)
+; X87-NEXT:    fsubp %st, %st(1)
+; X87-NEXT:    retl
+;
+; SSE-LABEL: complex_canonicalize_fmul_x86_fp80:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    fldt {{[0-9]+}}(%rsp)
+; SSE-NEXT:    fldt {{[0-9]+}}(%rsp)
+; SSE-NEXT:    fsub %st(1), %st
+; SSE-NEXT:    fld %st(0)
+; SSE-NEXT:    fadd %st(2), %st
+; SSE-NEXT:    fsubp %st, %st(1)
+; SSE-NEXT:    fld1
+; SSE-NEXT:    fmulp %st, %st(1)
+; SSE-NEXT:    fsubp %st, %st(1)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: complex_canonicalize_fmul_x86_fp80:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    fldt {{[0-9]+}}(%rsp)
+; AVX-NEXT:    fldt {{[0-9]+}}(%rsp)
+; AVX-NEXT:    fsub %st(1), %st
+; AVX-NEXT:    fld %st(0)
+; AVX-NEXT:    fadd %st(2), %st
+; AVX-NEXT:    fsubp %st, %st(1)
+; AVX-NEXT:    fld1
+; AVX-NEXT:    fmulp %st, %st(1)
+; AVX-NEXT:    fsubp %st, %st(1)
+; AVX-NEXT:    retq
 entry:
-
   %mul1 = fsub x86_fp80 %a, %b
   %add = fadd x86_fp80 %mul1, %b
   %mul2 = fsub x86_fp80 %add, %mul1
@@ -101,49 +99,57 @@ entry:
 }
 
 define double @canonicalize_fp64(double %a, double %b) unnamed_addr #0 {
-; SSE1-LABEL: canonicalize_fp64:
-; SSE1:       # %bb.0: # %start
-; SSE1-NEXT:    fldl {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fldl {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fucom %st(1)
-; SSE1-NEXT:    fnstsw %ax
-; SSE1-NEXT:    # kill: def $ah killed $ah killed $ax
-; SSE1-NEXT:    sahf
-; SSE1-NEXT:    fxch %st(1)
-; SSE1-NEXT:    fucom %st(0)
-; SSE1-NEXT:    fnstsw %ax
-; SSE1-NEXT:    fld %st(1)
-; SSE1-NEXT:    ja .LBB3_2
-; SSE1-NEXT:  # %bb.1: # %start
-; SSE1-NEXT:    fstp %st(0)
-; SSE1-NEXT:    fldz
-; SSE1-NEXT:    fxch %st(1)
-; SSE1-NEXT:  .LBB3_2: # %start
-; SSE1-NEXT:    fstp %st(1)
-; SSE1-NEXT:    # kill: def $ah killed $ah killed $ax
-; SSE1-NEXT:    sahf
-; SSE1-NEXT:    jp .LBB3_4
-; SSE1-NEXT:  # %bb.3: # %start
-; SSE1-NEXT:    fstp %st(1)
-; SSE1-NEXT:    fldz
-; SSE1-NEXT:  .LBB3_4: # %start
-; SSE1-NEXT:    fstp %st(0)
-; SSE1-NEXT:    fld1
-; SSE1-NEXT:    fmulp %st, %st(1)
-; SSE1-NEXT:    retl
-;
-; SSE2-LABEL: canonicalize_fp64:
-; SSE2:       # %bb.0: # %start
-; SSE2-NEXT:    movapd %xmm0, %xmm2
-; SSE2-NEXT:    cmpunordsd %xmm0, %xmm2
-; SSE2-NEXT:    movapd %xmm2, %xmm3
-; SSE2-NEXT:    andpd %xmm1, %xmm3
-; SSE2-NEXT:    maxsd %xmm0, %xmm1
-; SSE2-NEXT:    andnpd %xmm1, %xmm2
-; SSE2-NEXT:    orpd %xmm3, %xmm2
-; SSE2-NEXT:    mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE2-NEXT:    movapd %xmm2, %xmm0
-; SSE2-NEXT:    retq
+; X87-LABEL: canonicalize_fp64:
+; X87:       # %bb.0: # %start
+; X87-NEXT:    fldl {{[0-9]+}}(%esp)
+; X87-NEXT:    fldl {{[0-9]+}}(%esp)
+; X87-NEXT:    fucom %st(1)
+; X87-NEXT:    fnstsw %ax
+; X87-NEXT:    # kill: def $ah killed $ah killed $ax
+; X87-NEXT:    sahf
+; X87-NEXT:    fxch %st(1)
+; X87-NEXT:    fucom %st(0)
+; X87-NEXT:    fnstsw %ax
+; X87-NEXT:    fld %st(1)
+; X87-NEXT:    ja .LBB3_2
+; X87-NEXT:  # %bb.1: # %start
+; X87-NEXT:    fstp %st(0)
+; X87-NEXT:    fldz
+; X87-NEXT:    fxch %st(1)
+; X87-NEXT:  .LBB3_2: # %start
+; X87-NEXT:    fstp %st(1)
+; X87-NEXT:    # kill: def $ah killed $ah killed $ax
+; X87-NEXT:    sahf
+; X87-NEXT:    jp .LBB3_4
+; X87-NEXT:  # %bb.3: # %start
+; X87-NEXT:    fstp %st(1)
+; X87-NEXT:    fldz
+; X87-NEXT:  .LBB3_4: # %start
+; X87-NEXT:    fstp %st(0)
+; X87-NEXT:    fld1
+; X87-NEXT:    fmulp %st, %st(1)
+; X87-NEXT:    retl
+;
+; SSE-LABEL: canonicalize_fp64:
+; SSE:       # %bb.0: # %start
+; SSE-NEXT:    movapd %xmm0, %xmm2
+; SSE-NEXT:    cmpunordsd %xmm0, %xmm2
+; SSE-NEXT:    movapd %xmm2, %xmm3
+; SSE-NEXT:    andpd %xmm1, %xmm3
+; SSE-NEXT:    maxsd %xmm0, %xmm1
+; SSE-NEXT:    andnpd %xmm1, %xmm2
+; SSE-NEXT:    orpd %xmm3, %xmm2
+; SSE-NEXT:    mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT:    movapd %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: canonicalize_fp64:
+; AVX1:       # %bb.0: # %start
+; AVX1-NEXT:    vmaxsd %xmm0, %xmm1, %xmm2
+; AVX1-NEXT:    vcmpunordsd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
+; AVX1-NEXT:    vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: canonicalize_fp64:
 ; AVX2:       # %bb.0: # %start
@@ -161,7 +167,6 @@ define double @canonicalize_fp64(double %a, double %b) unnamed_addr #0 {
 ; AVX512F-NEXT:    vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm0
 ; AVX512F-NEXT:    retq
 start:
-
   %c = fcmp olt double %a, %b
   %d = fcmp uno double %a, 0.000000e+00
   %or.cond.i.i = or i1 %d, %c
@@ -171,49 +176,57 @@ start:
 }
 
 define float @canonicalize_fp32(float %aa, float %bb) unnamed_addr #0 {
-; SSE1-LABEL: canonicalize_fp32:
-; SSE1:       # %bb.0: # %start
-; SSE1-NEXT:    flds {{[0-9]+}}(%esp)
-; SSE1-NEXT:    flds {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fucom %st(1)
-; SSE1-NEXT:    fnstsw %ax
-; SSE1-NEXT:    # kill: def $ah killed $ah killed $ax
-; SSE1-NEXT:    sahf
-; SSE1-NEXT:    fxch %st(1)
-; SSE1-NEXT:    fucom %st(0)
-; SSE1-NEXT:    fnstsw %ax
-; SSE1-NEXT:    fld %st(1)
-; SSE1-NEXT:    ja .LBB4_2
-; SSE1-NEXT:  # %bb.1: # %start
-; SSE1-NEXT:    fstp %st(0)
-; SSE1-NEXT:    fldz
-; SSE1-NEXT:    fxch %st(1)
-; SSE1-NEXT:  .LBB4_2: # %start
-; SSE1-NEXT:    fstp %st(1)
-; SSE1-NEXT:    # kill: def $ah killed $ah killed $ax
-; SSE1-NEXT:    sahf
-; SSE1-NEXT:    jp .LBB4_4
-; SSE1-NEXT:  # %bb.3: # %start
-; SSE1-NEXT:    fstp %st(1)
-; SSE1-NEXT:    fldz
-; SSE1-NEXT:  .LBB4_4: # %start
-; SSE1-NEXT:    fstp %st(0)
-; SSE1-NEXT:    fld1
-; SSE1-NEXT:    fmulp %st, %st(1)
-; SSE1-NEXT:    retl
-;
-; SSE2-LABEL: canonicalize_fp32:
-; SSE2:       # %bb.0: # %start
-; SSE2-NEXT:    movaps %xmm0, %xmm2
-; SSE2-NEXT:    cmpunordss %xmm0, %xmm2
-; SSE2-NEXT:    movaps %xmm2, %xmm3
-; SSE2-NEXT:    andps %xmm1, %xmm3
-; SSE2-NEXT:    maxss %xmm0, %xmm1
-; SSE2-NEXT:    andnps %xmm1, %xmm2
-; SSE2-NEXT:    orps %xmm3, %xmm2
-; SSE2-NEXT:    mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE2-NEXT:    movaps %xmm2, %xmm0
-; SSE2-NEXT:    retq
+; X87-LABEL: canonicalize_fp32:
+; X87:       # %bb.0: # %start
+; X87-NEXT:    flds {{[0-9]+}}(%esp)
+; X87-NEXT:    flds {{[0-9]+}}(%esp)
+; X87-NEXT:    fucom %st(1)
+; X87-NEXT:    fnstsw %ax
+; X87-NEXT:    # kill: def $ah killed $ah killed $ax
+; X87-NEXT:    sahf
+; X87-NEXT:    fxch %st(1)
+; X87-NEXT:    fucom %st(0)
+; X87-NEXT:    fnstsw %ax
+; X87-NEXT:    fld %st(1)
+; X87-NEXT:    ja .LBB4_2
+; X87-NEXT:  # %bb.1: # %start
+; X87-NEXT:    fstp %st(0)
+; X87-NEXT:    fldz
+; X87-NEXT:    fxch %st(1)
+; X87-NEXT:  .LBB4_2: # %start
+; X87-NEXT:    fstp %st(1)
+; X87-NEXT:    # kill: def $ah killed $ah killed $ax
+; X87-NEXT:    sahf
+; X87-NEXT:    jp .LBB4_4
+; X87-NEXT:  # %bb.3: # %start
+; X87-NEXT:    fstp %st(1)
+; X87-NEXT:    fldz
+; X87-NEXT:  .LBB4_4: # %start
+; X87-NEXT:    fstp %st(0)
+; X87-NEXT:    fld1
+; X87-NEXT:    fmulp %st, %st(1)
+; X87-NEXT:    retl
+;
+; SSE-LABEL: canonicalize_fp32:
+; SSE:       # %bb.0: # %start
+; SSE-NEXT:    movaps %xmm0, %xmm2
+; SSE-NEXT:    cmpunordss %xmm0, %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm3
+; SSE-NEXT:    andps %xmm1, %xmm3
+; SSE-NEXT:    maxss %xmm0, %xmm1
+; SSE-NEXT:    andnps %xmm1, %xmm2
+; SSE-NEXT:    orps %xmm3, %xmm2
+; SSE-NEXT:    mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT:    movaps %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: canonicalize_fp32:
+; AVX1:       # %bb.0: # %start
+; AVX1-NEXT:    vmaxss %xmm0, %xmm1, %xmm2
+; AVX1-NEXT:    vcmpunordss %xmm0, %xmm0, %xmm0
+; AVX1-NEXT:    vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX1-NEXT:    vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: canonicalize_fp32:
 ; AVX2:       # %bb.0: # %start
@@ -231,7 +244,6 @@ define float @canonicalize_fp32(float %aa, float %bb) unnamed_addr #0 {
 ; AVX512F-NEXT:    vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm0
 ; AVX512F-NEXT:    retq
 start:
-
   %cc = fcmp olt float %aa, %bb
   %dd = fcmp uno float %aa, 0.000000e+00
   %or.cond.i.i.x = or i1 %dd, %cc
@@ -241,27 +253,27 @@ start:
 }
 
 define void @v_test_canonicalize_var_f32(float addrspace(1)* %out) #1 {
-; SSE1-LABEL: v_test_canonicalize_var_f32:
-; SSE1:       # %bb.0:
-; SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; SSE1-NEXT:    fld1
-; SSE1-NEXT:    fmuls (%eax)
-; SSE1-NEXT:    fstps (%eax)
-; SSE1-NEXT:    retl
-;
-; SSE2-LABEL: v_test_canonicalize_var_f32:
-; SSE2:       # %bb.0:
-; SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT:    mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    movss %xmm0, (%rdi)
-; SSE2-NEXT:    retq
-;
-; AVX1-LABEL: v_test_canonicalize_var_f32:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX1-NEXT:    vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT:    vmovss %xmm0, (%rdi)
-; AVX1-NEXT:    retq
+; X87-LABEL: v_test_canonicalize_var_f32:
+; X87:       # %bb.0:
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    fld1
+; X87-NEXT:    fmuls (%eax)
+; X87-NEXT:    fstps (%eax)
+; X87-NEXT:    retl
+;
+; SSE-LABEL: v_test_canonicalize_var_f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    mulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT:    movss %xmm0, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v_test_canonicalize_var_f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT:    vmulss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vmovss %xmm0, (%rdi)
+; AVX-NEXT:    retq
   %val = load float, float addrspace(1)* %out
   %canonicalized = call float @llvm.canonicalize.f32(float %val)
   store float %canonicalized, float addrspace(1)* %out
@@ -269,31 +281,30 @@ define void @v_test_canonicalize_var_f32(float addrspace(1)* %out) #1 {
 }
 
 define void @v_test_canonicalize_x86_fp80(x86_fp80 addrspace(1)* %out) #1 {
-; SSE1-LABEL: v_test_canonicalize_x86_fp80:
-; SSE1:       # %bb.0:
-; SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; SSE1-NEXT:    fldt (%eax)
-; SSE1-NEXT:    fld1
-; SSE1-NEXT:    fmulp %st, %st(1)
-; SSE1-NEXT:    fstpt (%eax)
-; SSE1-NEXT:    retl
-;
-; SSE2-LABEL: v_test_canonicalize_x86_fp80:
-; SSE2:       # %bb.0:
-; SSE2-NEXT:    fldt (%rdi)
-; SSE2-NEXT:    fld1
-; SSE2-NEXT:    fmulp %st, %st(1)
-; SSE2-NEXT:    fstpt (%rdi)
-; SSE2-NEXT:    retq
-;
-; AVX1-LABEL: v_test_canonicalize_x86_fp80:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    fldt (%rdi)
-; AVX1-NEXT:    fld1
-; AVX1-NEXT:    fmulp %st, %st(1)
-; AVX1-NEXT:    fstpt (%rdi)
-; AVX1-NEXT:    retq
-
+; X87-LABEL: v_test_canonicalize_x86_fp80:
+; X87:       # %bb.0:
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    fldt (%eax)
+; X87-NEXT:    fld1
+; X87-NEXT:    fmulp %st, %st(1)
+; X87-NEXT:    fstpt (%eax)
+; X87-NEXT:    retl
+;
+; SSE-LABEL: v_test_canonicalize_x86_fp80:
+; SSE:       # %bb.0:
+; SSE-NEXT:    fldt (%rdi)
+; SSE-NEXT:    fld1
+; SSE-NEXT:    fmulp %st, %st(1)
+; SSE-NEXT:    fstpt (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v_test_canonicalize_x86_fp80:
+; AVX:       # %bb.0:
+; AVX-NEXT:    fldt (%rdi)
+; AVX-NEXT:    fld1
+; AVX-NEXT:    fmulp %st, %st(1)
+; AVX-NEXT:    fstpt (%rdi)
+; AVX-NEXT:    retq
   %val = load x86_fp80, x86_fp80 addrspace(1)* %out
   %canonicalized = call x86_fp80 @llvm.canonicalize.f80(x86_fp80 %val)
   store x86_fp80 %canonicalized, x86_fp80 addrspace(1)* %out
@@ -301,28 +312,27 @@ define void @v_test_canonicalize_x86_fp80(x86_fp80 addrspace(1)* %out) #1 {
 }
 
 define void @v_test_canonicalize_var_f64(double addrspace(1)* %out) #1 {
-; SSE1-LABEL: v_test_canonicalize_var_f64:
-; SSE1:       # %bb.0:
-; SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; SSE1-NEXT:    fld1
-; SSE1-NEXT:    fmull (%eax)
-; SSE1-NEXT:    fstpl (%eax)
-; SSE1-NEXT:    retl
-;
-; SSE2-LABEL: v_test_canonicalize_var_f64:
-; SSE2:       # %bb.0:
-; SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT:    mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    movsd %xmm0, (%rdi)
-; SSE2-NEXT:    retq
-;
-; AVX1-LABEL: v_test_canonicalize_var_f64:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1-NEXT:    vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT:    vmovsd %xmm0, (%rdi)
-; AVX1-NEXT:    retq
-
+; X87-LABEL: v_test_canonicalize_var_f64:
+; X87:       # %bb.0:
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    fld1
+; X87-NEXT:    fmull (%eax)
+; X87-NEXT:    fstpl (%eax)
+; X87-NEXT:    retl
+;
+; SSE-LABEL: v_test_canonicalize_var_f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-NEXT:    mulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT:    movsd %xmm0, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: v_test_canonicalize_var_f64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT:    vmulsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vmovsd %xmm0, (%rdi)
+; AVX-NEXT:    retq
   %val = load double, double addrspace(1)* %out
   %canonicalized = call double @llvm.canonicalize.f64(double %val)
   store double %canonicalized, double addrspace(1)* %out
@@ -330,55 +340,59 @@ define void @v_test_canonicalize_var_f64(double addrspace(1)* %out) #1 {
 }
 
 define void @canonicalize_undef(double addrspace(1)* %out) {
-; SSE1-LABEL: canonicalize_undef:
-; SSE1:       # %bb.0:
-; SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; SSE1-NEXT:    movl $2146959360, 4(%eax) # imm = 0x7FF80000
-; SSE1-NEXT:    movl $0, (%eax)
-; SSE1-NEXT:    retl
-;
-; SSE2-LABEL: canonicalize_undef:
-; SSE2:       # %bb.0:
-; SSE2-NEXT:    movabsq $9221120237041090560, %rax # imm = 0x7FF8000000000000
-; SSE2-NEXT:    movq %rax, (%rdi)
-; SSE2-NEXT:    retq
-;
-; AVX1-LABEL: canonicalize_undef:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    movabsq $9221120237041090560, %rax # imm = 0x7FF8000000000000
-; AVX1-NEXT:    movq %rax, (%rdi)
-; AVX1-NEXT:    retq
-
+; X87-LABEL: canonicalize_undef:
+; X87:       # %bb.0:
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    movl $2146959360, 4(%eax) # imm = 0x7FF80000
+; X87-NEXT:    movl $0, (%eax)
+; X87-NEXT:    retl
+;
+; SSE-LABEL: canonicalize_undef:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movabsq $9221120237041090560, %rax # imm = 0x7FF8000000000000
+; SSE-NEXT:    movq %rax, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: canonicalize_undef:
+; AVX:       # %bb.0:
+; AVX-NEXT:    movabsq $9221120237041090560, %rax # imm = 0x7FF8000000000000
+; AVX-NEXT:    movq %rax, (%rdi)
+; AVX-NEXT:    retq
   %canonicalized = call double @llvm.canonicalize.f64(double undef)
   store double %canonicalized, double addrspace(1)* %out
   ret void
 }
 
 define <4 x float> @canon_fp32_varargsv4f32(<4 x float> %a) {
-; SSE1-LABEL: canon_fp32_varargsv4f32:
-; SSE1:       # %bb.0:
-; SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; SSE1-NEXT:    fld1
-; SSE1-NEXT:    fld %st(0)
-; SSE1-NEXT:    fmuls {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fld %st(1)
-; SSE1-NEXT:    fmuls {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fld %st(2)
-; SSE1-NEXT:    fmuls {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fxch %st(3)
-; SSE1-NEXT:    fmuls {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fstps 12(%eax)
-; SSE1-NEXT:    fxch %st(2)
-; SSE1-NEXT:    fstps 8(%eax)
-; SSE1-NEXT:    fxch %st(1)
-; SSE1-NEXT:    fstps 4(%eax)
-; SSE1-NEXT:    fstps (%eax)
-; SSE1-NEXT:    retl $4
-;
-; SSE2-LABEL: canon_fp32_varargsv4f32:
-; SSE2:       # %bb.0:
-; SSE2-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    retq
+; X87-LABEL: canon_fp32_varargsv4f32:
+; X87:       # %bb.0:
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    fld1
+; X87-NEXT:    fld %st(0)
+; X87-NEXT:    fmuls {{[0-9]+}}(%esp)
+; X87-NEXT:    fld %st(1)
+; X87-NEXT:    fmuls {{[0-9]+}}(%esp)
+; X87-NEXT:    fld %st(2)
+; X87-NEXT:    fmuls {{[0-9]+}}(%esp)
+; X87-NEXT:    fxch %st(3)
+; X87-NEXT:    fmuls {{[0-9]+}}(%esp)
+; X87-NEXT:    fstps 12(%eax)
+; X87-NEXT:    fxch %st(2)
+; X87-NEXT:    fstps 8(%eax)
+; X87-NEXT:    fxch %st(1)
+; X87-NEXT:    fstps 4(%eax)
+; X87-NEXT:    fstps (%eax)
+; X87-NEXT:    retl $4
+;
+; SSE-LABEL: canon_fp32_varargsv4f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: canon_fp32_varargsv4f32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: canon_fp32_varargsv4f32:
 ; AVX2:       # %bb.0:
@@ -396,32 +410,37 @@ define <4 x float> @canon_fp32_varargsv4f32(<4 x float> %a) {
 }
 
 define <4 x double> @canon_fp64_varargsv4f64(<4 x double> %a) {
-; SSE1-LABEL: canon_fp64_varargsv4f64:
-; SSE1:       # %bb.0:
-; SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; SSE1-NEXT:    fld1
-; SSE1-NEXT:    fld %st(0)
-; SSE1-NEXT:    fmull {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fld %st(1)
-; SSE1-NEXT:    fmull {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fld %st(2)
-; SSE1-NEXT:    fmull {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fxch %st(3)
-; SSE1-NEXT:    fmull {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fstpl 24(%eax)
-; SSE1-NEXT:    fxch %st(2)
-; SSE1-NEXT:    fstpl 16(%eax)
-; SSE1-NEXT:    fxch %st(1)
-; SSE1-NEXT:    fstpl 8(%eax)
-; SSE1-NEXT:    fstpl (%eax)
-; SSE1-NEXT:    retl $4
-;
-; SSE2-LABEL: canon_fp64_varargsv4f64:
-; SSE2:       # %bb.0:
-; SSE2-NEXT:    movapd {{.*#+}} xmm2 = [1.0E+0,1.0E+0]
-; SSE2-NEXT:    mulpd %xmm2, %xmm0
-; SSE2-NEXT:    mulpd %xmm2, %xmm1
-; SSE2-NEXT:    retq
+; X87-LABEL: canon_fp64_varargsv4f64:
+; X87:       # %bb.0:
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    fld1
+; X87-NEXT:    fld %st(0)
+; X87-NEXT:    fmull {{[0-9]+}}(%esp)
+; X87-NEXT:    fld %st(1)
+; X87-NEXT:    fmull {{[0-9]+}}(%esp)
+; X87-NEXT:    fld %st(2)
+; X87-NEXT:    fmull {{[0-9]+}}(%esp)
+; X87-NEXT:    fxch %st(3)
+; X87-NEXT:    fmull {{[0-9]+}}(%esp)
+; X87-NEXT:    fstpl 24(%eax)
+; X87-NEXT:    fxch %st(2)
+; X87-NEXT:    fstpl 16(%eax)
+; X87-NEXT:    fxch %st(1)
+; X87-NEXT:    fstpl 8(%eax)
+; X87-NEXT:    fstpl (%eax)
+; X87-NEXT:    retl $4
+;
+; SSE-LABEL: canon_fp64_varargsv4f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd {{.*#+}} xmm2 = [1.0E+0,1.0E+0]
+; SSE-NEXT:    mulpd %xmm2, %xmm0
+; SSE-NEXT:    mulpd %xmm2, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: canon_fp64_varargsv4f64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: canon_fp64_varargsv4f64:
 ; AVX2:       # %bb.0:
@@ -439,66 +458,73 @@ define <4 x double> @canon_fp64_varargsv4f64(<4 x double> %a) {
 }
 
 define <2 x x86_fp80> @canon_fp80_varargsv2fp80(<2 x x86_fp80> %a) {
-; SSE1-LABEL: canon_fp80_varargsv2fp80:
-; SSE1:       # %bb.0:
-; SSE1-NEXT:    fldt {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fldt {{[0-9]+}}(%esp)
-; SSE1-NEXT:    fld1
-; SSE1-NEXT:    fmul %st, %st(1)
-; SSE1-NEXT:    fmulp %st, %st(2)
-; SSE1-NEXT:    fxch %st(1)
-; SSE1-NEXT:    retl
-;
-; SSE2-LABEL: canon_fp80_varargsv2fp80:
-; SSE2:       # %bb.0:
-; SSE2-NEXT:    fldt {{[0-9]+}}(%rsp)
-; SSE2-NEXT:    fldt {{[0-9]+}}(%rsp)
-; SSE2-NEXT:    fld1
-; SSE2-NEXT:    fmul %st, %st(1)
-; SSE2-NEXT:    fmulp %st, %st(2)
-; SSE2-NEXT:    fxch %st(1)
-; SSE2-NEXT:    retq
-;
-; AVX1-LABEL: canon_fp80_varargsv2fp80:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    fldt {{[0-9]+}}(%rsp)
-; AVX1-NEXT:    fldt {{[0-9]+}}(%rsp)
-; AVX1-NEXT:    fld1
-; AVX1-NEXT:    fmul %st, %st(1)
-; AVX1-NEXT:    fmulp %st, %st(2)
-; AVX1-NEXT:    fxch %st(1)
-; AVX1-NEXT:    retq
+; X87-LABEL: canon_fp80_varargsv2fp80:
+; X87:       # %bb.0:
+; X87-NEXT:    fldt {{[0-9]+}}(%esp)
+; X87-NEXT:    fldt {{[0-9]+}}(%esp)
+; X87-NEXT:    fld1
+; X87-NEXT:    fmul %st, %st(1)
+; X87-NEXT:    fmulp %st, %st(2)
+; X87-NEXT:    fxch %st(1)
+; X87-NEXT:    retl
+;
+; SSE-LABEL: canon_fp80_varargsv2fp80:
+; SSE:       # %bb.0:
+; SSE-NEXT:    fldt {{[0-9]+}}(%rsp)
+; SSE-NEXT:    fldt {{[0-9]+}}(%rsp)
+; SSE-NEXT:    fld1
+; SSE-NEXT:    fmul %st, %st(1)
+; SSE-NEXT:    fmulp %st, %st(2)
+; SSE-NEXT:    fxch %st(1)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: canon_fp80_varargsv2fp80:
+; AVX:       # %bb.0:
+; AVX-NEXT:    fldt {{[0-9]+}}(%rsp)
+; AVX-NEXT:    fldt {{[0-9]+}}(%rsp)
+; AVX-NEXT:    fld1
+; AVX-NEXT:    fmul %st, %st(1)
+; AVX-NEXT:    fmulp %st, %st(2)
+; AVX-NEXT:    fxch %st(1)
+; AVX-NEXT:    retq
   %canonicalized = call <2 x x86_fp80> @llvm.canonicalize.v2f80(<2 x x86_fp80> %a)
   ret <2 x x86_fp80> %canonicalized
 }
 
 define void @vec_canonicalize_var_v4f32(<4 x float> addrspace(1)* %out) #1 {
-; SSE1-LABEL: vec_canonicalize_var_v4f32:
-; SSE1:       # %bb.0:
-; SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; SSE1-NEXT:    fld1
-; SSE1-NEXT:    fld %st(0)
-; SSE1-NEXT:    fmuls (%eax)
-; SSE1-NEXT:    fld %st(1)
-; SSE1-NEXT:    fmuls 4(%eax)
-; SSE1-NEXT:    fld %st(2)
-; SSE1-NEXT:    fmuls 8(%eax)
-; SSE1-NEXT:    fxch %st(3)
-; SSE1-NEXT:    fmuls 12(%eax)
-; SSE1-NEXT:    fstps 12(%eax)
-; SSE1-NEXT:    fxch %st(2)
-; SSE1-NEXT:    fstps 8(%eax)
-; SSE1-NEXT:    fxch %st(1)
-; SSE1-NEXT:    fstps 4(%eax)
-; SSE1-NEXT:    fstps (%eax)
-; SSE1-NEXT:    retl
-;
-; SSE2-LABEL: vec_canonicalize_var_v4f32:
-; SSE2:       # %bb.0:
-; SSE2-NEXT:    movaps (%rdi), %xmm0
-; SSE2-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT:    movaps %xmm0, (%rdi)
-; SSE2-NEXT:    retq
+; X87-LABEL: vec_canonicalize_var_v4f32:
+; X87:       # %bb.0:
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    fld1
+; X87-NEXT:    fld %st(0)
+; X87-NEXT:    fmuls (%eax)
+; X87-NEXT:    fld %st(1)
+; X87-NEXT:    fmuls 4(%eax)
+; X87-NEXT:    fld %st(2)
+; X87-NEXT:    fmuls 8(%eax)
+; X87-NEXT:    fxch %st(3)
+; X87-NEXT:    fmuls 12(%eax)
+; X87-NEXT:    fstps 12(%eax)
+; X87-NEXT:    fxch %st(2)
+; X87-NEXT:    fstps 8(%eax)
+; X87-NEXT:    fxch %st(1)
+; X87-NEXT:    fstps 4(%eax)
+; X87-NEXT:    fstps (%eax)
+; X87-NEXT:    retl
+;
+; SSE-LABEL: vec_canonicalize_var_v4f32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movaps (%rdi), %xmm0
+; SSE-NEXT:    mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT:    movaps %xmm0, (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: vec_canonicalize_var_v4f32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovaps (%rdi), %xmm0
+; AVX1-NEXT:    vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vmovaps %xmm0, (%rdi)
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec_canonicalize_var_v4f32:
 ; AVX2:       # %bb.0:
@@ -520,35 +546,43 @@ define void @vec_canonicalize_var_v4f32(<4 x float> addrspace(1)* %out) #1 {
 }
 
 define void @vec_canonicalize_var_v4f64(<4 x double> addrspace(1)* %out) #1 {
-; SSE1-LABEL: vec_canonicalize_var_v4f64:
-; SSE1:       # %bb.0:
-; SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; SSE1-NEXT:    fld1
-; SSE1-NEXT:    fld %st(0)
-; SSE1-NEXT:    fmull (%eax)
-; SSE1-NEXT:    fld %st(1)
-; SSE1-NEXT:    fmull 8(%eax)
-; SSE1-NEXT:    fld %st(2)
-; SSE1-NEXT:    fmull 16(%eax)
-; SSE1-NEXT:    fxch %st(3)
-; SSE1-NEXT:    fmull 24(%eax)
-; SSE1-NEXT:    fstpl 24(%eax)
-; SSE1-NEXT:    fxch %st(2)
-; SSE1-NEXT:    fstpl 16(%eax)
-; SSE1-NEXT:    fxch %st(1)
-; SSE1-NEXT:    fstpl 8(%eax)
-; SSE1-NEXT:    fstpl (%eax)
-; SSE1-NEXT:    retl
-;
-; SSE2-LABEL: vec_canonicalize_var_v4f64:
-; SSE2:       # %bb.0:
-; SSE2-NEXT:    movapd {{.*#+}} xmm0 = [1.0E+0,1.0E+0]
-; SSE2-NEXT:    movapd 16(%rdi), %xmm1
-; SSE2-NEXT:    mulpd %xmm0, %xmm1
-; SSE2-NEXT:    mulpd (%rdi), %xmm0
-; SSE2-NEXT:    movapd %xmm0, (%rdi)
-; SSE2-NEXT:    movapd %xmm1, 16(%rdi)
-; SSE2-NEXT:    retq
+; X87-LABEL: vec_canonicalize_var_v4f64:
+; X87:       # %bb.0:
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    fld1
+; X87-NEXT:    fld %st(0)
+; X87-NEXT:    fmull (%eax)
+; X87-NEXT:    fld %st(1)
+; X87-NEXT:    fmull 8(%eax)
+; X87-NEXT:    fld %st(2)
+; X87-NEXT:    fmull 16(%eax)
+; X87-NEXT:    fxch %st(3)
+; X87-NEXT:    fmull 24(%eax)
+; X87-NEXT:    fstpl 24(%eax)
+; X87-NEXT:    fxch %st(2)
+; X87-NEXT:    fstpl 16(%eax)
+; X87-NEXT:    fxch %st(1)
+; X87-NEXT:    fstpl 8(%eax)
+; X87-NEXT:    fstpl (%eax)
+; X87-NEXT:    retl
+;
+; SSE-LABEL: vec_canonicalize_var_v4f64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movapd {{.*#+}} xmm0 = [1.0E+0,1.0E+0]
+; SSE-NEXT:    movapd 16(%rdi), %xmm1
+; SSE-NEXT:    mulpd %xmm0, %xmm1
+; SSE-NEXT:    mulpd (%rdi), %xmm0
+; SSE-NEXT:    movapd %xmm0, (%rdi)
+; SSE-NEXT:    movapd %xmm1, 16(%rdi)
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: vec_canonicalize_var_v4f64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovapd (%rdi), %ymm0
+; AVX1-NEXT:    vmulpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT:    vmovapd %ymm0, (%rdi)
+; AVX1-NEXT:    vzeroupper
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec_canonicalize_var_v4f64:
 ; AVX2:       # %bb.0:
@@ -572,65 +606,67 @@ define void @vec_canonicalize_var_v4f64(<4 x double> addrspace(1)* %out) #1 {
 }
 
 define void @vec_canonicalize_x86_fp80(<4 x x86_fp80> addrspace(1)* %out) #1 {
-; SSE1-LABEL: vec_canonicalize_x86_fp80:
-; SSE1:       # %bb.0:
-; SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; SSE1-NEXT:    fldt 30(%eax)
-; SSE1-NEXT:    fldt 20(%eax)
-; SSE1-NEXT:    fldt 10(%eax)
-; SSE1-NEXT:    fldt (%eax)
-; SSE1-NEXT:    fld1
-; SSE1-NEXT:    fmul %st, %st(1)
-; SSE1-NEXT:    fmul %st, %st(2)
-; SSE1-NEXT:    fmul %st, %st(3)
-; SSE1-NEXT:    fmulp %st, %st(4)
-; SSE1-NEXT:    fxch %st(3)
-; SSE1-NEXT:    fstpt 30(%eax)
-; SSE1-NEXT:    fxch %st(1)
-; SSE1-NEXT:    fstpt 20(%eax)
-; SSE1-NEXT:    fstpt 10(%eax)
-; SSE1-NEXT:    fstpt (%eax)
-; SSE1-NEXT:    retl
-;
-; SSE2-LABEL: vec_canonicalize_x86_fp80:
-; SSE2:       # %bb.0:
-; SSE2-NEXT:    fldt 30(%rdi)
-; SSE2-NEXT:    fldt 20(%rdi)
-; SSE2-NEXT:    fldt 10(%rdi)
-; SSE2-NEXT:    fldt (%rdi)
-; SSE2-NEXT:    fld1
-; SSE2-NEXT:    fmul %st, %st(1)
-; SSE2-NEXT:    fmul %st, %st(2)
-; SSE2-NEXT:    fmul %st, %st(3)
-; SSE2-NEXT:    fmulp %st, %st(4)
-; SSE2-NEXT:    fxch %st(3)
-; SSE2-NEXT:    fstpt 30(%rdi)
-; SSE2-NEXT:    fxch %st(1)
-; SSE2-NEXT:    fstpt 20(%rdi)
-; SSE2-NEXT:    fstpt 10(%rdi)
-; SSE2-NEXT:    fstpt (%rdi)
-; SSE2-NEXT:    retq
-;
-; AVX1-LABEL: vec_canonicalize_x86_fp80:
-; AVX1:       # %bb.0:
-; AVX1-NEXT:    fldt 30(%rdi)
-; AVX1-NEXT:    fldt 20(%rdi)
-; AVX1-NEXT:    fldt 10(%rdi)
-; AVX1-NEXT:    fldt (%rdi)
-; AVX1-NEXT:    fld1
-; AVX1-NEXT:    fmul %st, %st(1)
-; AVX1-NEXT:    fmul %st, %st(2)
-; AVX1-NEXT:    fmul %st, %st(3)
-; AVX1-NEXT:    fmulp %st, %st(4)
-; AVX1-NEXT:    fxch %st(3)
-; AVX1-NEXT:    fstpt 30(%rdi)
-; AVX1-NEXT:    fxch %st(1)
-; AVX1-NEXT:    fstpt 20(%rdi)
-; AVX1-NEXT:    fstpt 10(%rdi)
-; AVX1-NEXT:    fstpt (%rdi)
-; AVX1-NEXT:    retq
+; X87-LABEL: vec_canonicalize_x86_fp80:
+; X87:       # %bb.0:
+; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X87-NEXT:    fldt 30(%eax)
+; X87-NEXT:    fldt 20(%eax)
+; X87-NEXT:    fldt 10(%eax)
+; X87-NEXT:    fldt (%eax)
+; X87-NEXT:    fld1
+; X87-NEXT:    fmul %st, %st(1)
+; X87-NEXT:    fmul %st, %st(2)
+; X87-NEXT:    fmul %st, %st(3)
+; X87-NEXT:    fmulp %st, %st(4)
+; X87-NEXT:    fxch %st(3)
+; X87-NEXT:    fstpt 30(%eax)
+; X87-NEXT:    fxch %st(1)
+; X87-NEXT:    fstpt 20(%eax)
+; X87-NEXT:    fstpt 10(%eax)
+; X87-NEXT:    fstpt (%eax)
+; X87-NEXT:    retl
+;
+; SSE-LABEL: vec_canonicalize_x86_fp80:
+; SSE:       # %bb.0:
+; SSE-NEXT:    fldt 30(%rdi)
+; SSE-NEXT:    fldt 20(%rdi)
+; SSE-NEXT:    fldt 10(%rdi)
+; SSE-NEXT:    fldt (%rdi)
+; SSE-NEXT:    fld1
+; SSE-NEXT:    fmul %st, %st(1)
+; SSE-NEXT:    fmul %st, %st(2)
+; SSE-NEXT:    fmul %st, %st(3)
+; SSE-NEXT:    fmulp %st, %st(4)
+; SSE-NEXT:    fxch %st(3)
+; SSE-NEXT:    fstpt 30(%rdi)
+; SSE-NEXT:    fxch %st(1)
+; SSE-NEXT:    fstpt 20(%rdi)
+; SSE-NEXT:    fstpt 10(%rdi)
+; SSE-NEXT:    fstpt (%rdi)
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: vec_canonicalize_x86_fp80:
+; AVX:       # %bb.0:
+; AVX-NEXT:    fldt 30(%rdi)
+; AVX-NEXT:    fldt 20(%rdi)
+; AVX-NEXT:    fldt 10(%rdi)
+; AVX-NEXT:    fldt (%rdi)
+; AVX-NEXT:    fld1
+; AVX-NEXT:    fmul %st, %st(1)
+; AVX-NEXT:    fmul %st, %st(2)
+; AVX-NEXT:    fmul %st, %st(3)
+; AVX-NEXT:    fmulp %st, %st(4)
+; AVX-NEXT:    fxch %st(3)
+; AVX-NEXT:    fstpt 30(%rdi)
+; AVX-NEXT:    fxch %st(1)
+; AVX-NEXT:    fstpt 20(%rdi)
+; AVX-NEXT:    fstpt 10(%rdi)
+; AVX-NEXT:    fstpt (%rdi)
+; AVX-NEXT:    retq
   %val = load <4 x x86_fp80>, <4 x x86_fp80> addrspace(1)* %out
   %canonicalized = call <4 x x86_fp80> @llvm.canonicalize.f80(<4 x x86_fp80> %val)
   store <4 x x86_fp80> %canonicalized, <4 x x86_fp80> addrspace(1)* %out
   ret void
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; SSE2: {{.*}}


        


More information about the llvm-commits mailing list