[llvm] 0cc12b8 - [X86] Remove regcall calling convention from fp-strict-scalar.ll. Add 32-bit and 64-bit check prefixes.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 21 16:33:59 PST 2019


Author: Craig Topper
Date: 2019-11-21T16:18:55-08:00
New Revision: 0cc12b8a83100027b16e1dc5cec079ba437ca3e0

URL: https://github.com/llvm/llvm-project/commit/0cc12b8a83100027b16e1dc5cec079ba437ca3e0
DIFF: https://github.com/llvm/llvm-project/commit/0cc12b8a83100027b16e1dc5cec079ba437ca3e0.diff

LOG: [X86] Remove regcall calling convention from fp-strict-scalar.ll. Add 32-bit and 64-bit check prefixes.

The regcall was making 32-bit mode pass things in xmm registers
which made 32-bit and 64-bit more similar. But I have upcoming
patches that require them to be separated anyway.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/fp-strict-scalar.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/fp-strict-scalar.ll b/llvm/test/CodeGen/X86/fp-strict-scalar.ll
index f54b3ac67306..dcbcc9377d19 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE,SSE-X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE,SSE-X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX,AVX-X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX,AVX-X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX,AVX-X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX,AVX-X64
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse -O3 | FileCheck %s --check-prefixes=X87
 
 declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
@@ -16,16 +16,44 @@ declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, me
 declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
 declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata)
 
-define x86_regcallcc double @f1(double %a, double %b) #0 {
-; SSE-LABEL: f1:
-; SSE:       # %bb.0:
-; SSE-NEXT:    addsd %xmm1, %xmm0
-; SSE-NEXT:    ret{{[l|q]}}
+define double @f1(double %a, double %b) nounwind strictfp {
+; SSE-X86-LABEL: f1:
+; SSE-X86:       # %bb.0:
+; SSE-X86-NEXT:    pushl %ebp
+; SSE-X86-NEXT:    movl %esp, %ebp
+; SSE-X86-NEXT:    andl $-8, %esp
+; SSE-X86-NEXT:    subl $8, %esp
+; SSE-X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-X86-NEXT:    addsd 16(%ebp), %xmm0
+; SSE-X86-NEXT:    movsd %xmm0, (%esp)
+; SSE-X86-NEXT:    fldl (%esp)
+; SSE-X86-NEXT:    movl %ebp, %esp
+; SSE-X86-NEXT:    popl %ebp
+; SSE-X86-NEXT:    retl
 ;
-; AVX-LABEL: f1:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    ret{{[l|q]}}
+; SSE-X64-LABEL: f1:
+; SSE-X64:       # %bb.0:
+; SSE-X64-NEXT:    addsd %xmm1, %xmm0
+; SSE-X64-NEXT:    retq
+;
+; AVX-X86-LABEL: f1:
+; AVX-X86:       # %bb.0:
+; AVX-X86-NEXT:    pushl %ebp
+; AVX-X86-NEXT:    movl %esp, %ebp
+; AVX-X86-NEXT:    andl $-8, %esp
+; AVX-X86-NEXT:    subl $8, %esp
+; AVX-X86-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-X86-NEXT:    vaddsd 16(%ebp), %xmm0, %xmm0
+; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
+; AVX-X86-NEXT:    fldl (%esp)
+; AVX-X86-NEXT:    movl %ebp, %esp
+; AVX-X86-NEXT:    popl %ebp
+; AVX-X86-NEXT:    retl
+;
+; AVX-X64-LABEL: f1:
+; AVX-X64:       # %bb.0:
+; AVX-X64-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-X64-NEXT:    retq
 ;
 ; X87-LABEL: f1:
 ; X87:       # %bb.0:
@@ -38,16 +66,36 @@ define x86_regcallcc double @f1(double %a, double %b) #0 {
   ret double %ret
 }
 
-define x86_regcallcc float @f2(float %a, float %b) #0 {
-; SSE-LABEL: f2:
-; SSE:       # %bb.0:
-; SSE-NEXT:    addss %xmm1, %xmm0
-; SSE-NEXT:    ret{{[l|q]}}
+define float @f2(float %a, float %b) nounwind strictfp {
+; SSE-X86-LABEL: f2:
+; SSE-X86:       # %bb.0:
+; SSE-X86-NEXT:    pushl %eax
+; SSE-X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-X86-NEXT:    addss {{[0-9]+}}(%esp), %xmm0
+; SSE-X86-NEXT:    movss %xmm0, (%esp)
+; SSE-X86-NEXT:    flds (%esp)
+; SSE-X86-NEXT:    popl %eax
+; SSE-X86-NEXT:    retl
+;
+; SSE-X64-LABEL: f2:
+; SSE-X64:       # %bb.0:
+; SSE-X64-NEXT:    addss %xmm1, %xmm0
+; SSE-X64-NEXT:    retq
 ;
-; AVX-LABEL: f2:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    ret{{[l|q]}}
+; AVX-X86-LABEL: f2:
+; AVX-X86:       # %bb.0:
+; AVX-X86-NEXT:    pushl %eax
+; AVX-X86-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-X86-NEXT:    vaddss {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
+; AVX-X86-NEXT:    flds (%esp)
+; AVX-X86-NEXT:    popl %eax
+; AVX-X86-NEXT:    retl
+;
+; AVX-X64-LABEL: f2:
+; AVX-X64:       # %bb.0:
+; AVX-X64-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-X64-NEXT:    retq
 ;
 ; X87-LABEL: f2:
 ; X87:       # %bb.0:
@@ -60,16 +108,44 @@ define x86_regcallcc float @f2(float %a, float %b) #0 {
   ret float %ret
 }
 
-define x86_regcallcc double @f3(double %a, double %b) #0 {
-; SSE-LABEL: f3:
-; SSE:       # %bb.0:
-; SSE-NEXT:    subsd %xmm1, %xmm0
-; SSE-NEXT:    ret{{[l|q]}}
+define double @f3(double %a, double %b) nounwind strictfp {
+; SSE-X86-LABEL: f3:
+; SSE-X86:       # %bb.0:
+; SSE-X86-NEXT:    pushl %ebp
+; SSE-X86-NEXT:    movl %esp, %ebp
+; SSE-X86-NEXT:    andl $-8, %esp
+; SSE-X86-NEXT:    subl $8, %esp
+; SSE-X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-X86-NEXT:    subsd 16(%ebp), %xmm0
+; SSE-X86-NEXT:    movsd %xmm0, (%esp)
+; SSE-X86-NEXT:    fldl (%esp)
+; SSE-X86-NEXT:    movl %ebp, %esp
+; SSE-X86-NEXT:    popl %ebp
+; SSE-X86-NEXT:    retl
+;
+; SSE-X64-LABEL: f3:
+; SSE-X64:       # %bb.0:
+; SSE-X64-NEXT:    subsd %xmm1, %xmm0
+; SSE-X64-NEXT:    retq
 ;
-; AVX-LABEL: f3:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    ret{{[l|q]}}
+; AVX-X86-LABEL: f3:
+; AVX-X86:       # %bb.0:
+; AVX-X86-NEXT:    pushl %ebp
+; AVX-X86-NEXT:    movl %esp, %ebp
+; AVX-X86-NEXT:    andl $-8, %esp
+; AVX-X86-NEXT:    subl $8, %esp
+; AVX-X86-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-X86-NEXT:    vsubsd 16(%ebp), %xmm0, %xmm0
+; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
+; AVX-X86-NEXT:    fldl (%esp)
+; AVX-X86-NEXT:    movl %ebp, %esp
+; AVX-X86-NEXT:    popl %ebp
+; AVX-X86-NEXT:    retl
+;
+; AVX-X64-LABEL: f3:
+; AVX-X64:       # %bb.0:
+; AVX-X64-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
+; AVX-X64-NEXT:    retq
 ;
 ; X87-LABEL: f3:
 ; X87:       # %bb.0:
@@ -82,16 +158,36 @@ define x86_regcallcc double @f3(double %a, double %b) #0 {
   ret double %ret
 }
 
-define x86_regcallcc float @f4(float %a, float %b) #0 {
-; SSE-LABEL: f4:
-; SSE:       # %bb.0:
-; SSE-NEXT:    subss %xmm1, %xmm0
-; SSE-NEXT:    ret{{[l|q]}}
+define float @f4(float %a, float %b) nounwind strictfp {
+; SSE-X86-LABEL: f4:
+; SSE-X86:       # %bb.0:
+; SSE-X86-NEXT:    pushl %eax
+; SSE-X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-X86-NEXT:    subss {{[0-9]+}}(%esp), %xmm0
+; SSE-X86-NEXT:    movss %xmm0, (%esp)
+; SSE-X86-NEXT:    flds (%esp)
+; SSE-X86-NEXT:    popl %eax
+; SSE-X86-NEXT:    retl
+;
+; SSE-X64-LABEL: f4:
+; SSE-X64:       # %bb.0:
+; SSE-X64-NEXT:    subss %xmm1, %xmm0
+; SSE-X64-NEXT:    retq
 ;
-; AVX-LABEL: f4:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    ret{{[l|q]}}
+; AVX-X86-LABEL: f4:
+; AVX-X86:       # %bb.0:
+; AVX-X86-NEXT:    pushl %eax
+; AVX-X86-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-X86-NEXT:    vsubss {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
+; AVX-X86-NEXT:    flds (%esp)
+; AVX-X86-NEXT:    popl %eax
+; AVX-X86-NEXT:    retl
+;
+; AVX-X64-LABEL: f4:
+; AVX-X64:       # %bb.0:
+; AVX-X64-NEXT:    vsubss %xmm1, %xmm0, %xmm0
+; AVX-X64-NEXT:    retq
 ;
 ; X87-LABEL: f4:
 ; X87:       # %bb.0:
@@ -104,16 +200,44 @@ define x86_regcallcc float @f4(float %a, float %b) #0 {
   ret float %ret
 }
 
-define x86_regcallcc double @f5(double %a, double %b) #0 {
-; SSE-LABEL: f5:
-; SSE:       # %bb.0:
-; SSE-NEXT:    mulsd %xmm1, %xmm0
-; SSE-NEXT:    ret{{[l|q]}}
+define double @f5(double %a, double %b) nounwind strictfp {
+; SSE-X86-LABEL: f5:
+; SSE-X86:       # %bb.0:
+; SSE-X86-NEXT:    pushl %ebp
+; SSE-X86-NEXT:    movl %esp, %ebp
+; SSE-X86-NEXT:    andl $-8, %esp
+; SSE-X86-NEXT:    subl $8, %esp
+; SSE-X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-X86-NEXT:    mulsd 16(%ebp), %xmm0
+; SSE-X86-NEXT:    movsd %xmm0, (%esp)
+; SSE-X86-NEXT:    fldl (%esp)
+; SSE-X86-NEXT:    movl %ebp, %esp
+; SSE-X86-NEXT:    popl %ebp
+; SSE-X86-NEXT:    retl
+;
+; SSE-X64-LABEL: f5:
+; SSE-X64:       # %bb.0:
+; SSE-X64-NEXT:    mulsd %xmm1, %xmm0
+; SSE-X64-NEXT:    retq
 ;
-; AVX-LABEL: f5:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmulsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    ret{{[l|q]}}
+; AVX-X86-LABEL: f5:
+; AVX-X86:       # %bb.0:
+; AVX-X86-NEXT:    pushl %ebp
+; AVX-X86-NEXT:    movl %esp, %ebp
+; AVX-X86-NEXT:    andl $-8, %esp
+; AVX-X86-NEXT:    subl $8, %esp
+; AVX-X86-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-X86-NEXT:    vmulsd 16(%ebp), %xmm0, %xmm0
+; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
+; AVX-X86-NEXT:    fldl (%esp)
+; AVX-X86-NEXT:    movl %ebp, %esp
+; AVX-X86-NEXT:    popl %ebp
+; AVX-X86-NEXT:    retl
+;
+; AVX-X64-LABEL: f5:
+; AVX-X64:       # %bb.0:
+; AVX-X64-NEXT:    vmulsd %xmm1, %xmm0, %xmm0
+; AVX-X64-NEXT:    retq
 ;
 ; X87-LABEL: f5:
 ; X87:       # %bb.0:
@@ -126,16 +250,36 @@ define x86_regcallcc double @f5(double %a, double %b) #0 {
   ret double %ret
 }
 
-define x86_regcallcc float @f6(float %a, float %b) #0 {
-; SSE-LABEL: f6:
-; SSE:       # %bb.0:
-; SSE-NEXT:    mulss %xmm1, %xmm0
-; SSE-NEXT:    ret{{[l|q]}}
+define float @f6(float %a, float %b) nounwind strictfp {
+; SSE-X86-LABEL: f6:
+; SSE-X86:       # %bb.0:
+; SSE-X86-NEXT:    pushl %eax
+; SSE-X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-X86-NEXT:    mulss {{[0-9]+}}(%esp), %xmm0
+; SSE-X86-NEXT:    movss %xmm0, (%esp)
+; SSE-X86-NEXT:    flds (%esp)
+; SSE-X86-NEXT:    popl %eax
+; SSE-X86-NEXT:    retl
+;
+; SSE-X64-LABEL: f6:
+; SSE-X64:       # %bb.0:
+; SSE-X64-NEXT:    mulss %xmm1, %xmm0
+; SSE-X64-NEXT:    retq
 ;
-; AVX-LABEL: f6:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmulss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    ret{{[l|q]}}
+; AVX-X86-LABEL: f6:
+; AVX-X86:       # %bb.0:
+; AVX-X86-NEXT:    pushl %eax
+; AVX-X86-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-X86-NEXT:    vmulss {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
+; AVX-X86-NEXT:    flds (%esp)
+; AVX-X86-NEXT:    popl %eax
+; AVX-X86-NEXT:    retl
+;
+; AVX-X64-LABEL: f6:
+; AVX-X64:       # %bb.0:
+; AVX-X64-NEXT:    vmulss %xmm1, %xmm0, %xmm0
+; AVX-X64-NEXT:    retq
 ;
 ; X87-LABEL: f6:
 ; X87:       # %bb.0:
@@ -148,16 +292,44 @@ define x86_regcallcc float @f6(float %a, float %b) #0 {
   ret float %ret
 }
 
-define x86_regcallcc double @f7(double %a, double %b) #0 {
-; SSE-LABEL: f7:
-; SSE:       # %bb.0:
-; SSE-NEXT:    divsd %xmm1, %xmm0
-; SSE-NEXT:    ret{{[l|q]}}
+define double @f7(double %a, double %b) nounwind strictfp {
+; SSE-X86-LABEL: f7:
+; SSE-X86:       # %bb.0:
+; SSE-X86-NEXT:    pushl %ebp
+; SSE-X86-NEXT:    movl %esp, %ebp
+; SSE-X86-NEXT:    andl $-8, %esp
+; SSE-X86-NEXT:    subl $8, %esp
+; SSE-X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE-X86-NEXT:    divsd 16(%ebp), %xmm0
+; SSE-X86-NEXT:    movsd %xmm0, (%esp)
+; SSE-X86-NEXT:    fldl (%esp)
+; SSE-X86-NEXT:    movl %ebp, %esp
+; SSE-X86-NEXT:    popl %ebp
+; SSE-X86-NEXT:    retl
+;
+; SSE-X64-LABEL: f7:
+; SSE-X64:       # %bb.0:
+; SSE-X64-NEXT:    divsd %xmm1, %xmm0
+; SSE-X64-NEXT:    retq
 ;
-; AVX-LABEL: f7:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vdivsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    ret{{[l|q]}}
+; AVX-X86-LABEL: f7:
+; AVX-X86:       # %bb.0:
+; AVX-X86-NEXT:    pushl %ebp
+; AVX-X86-NEXT:    movl %esp, %ebp
+; AVX-X86-NEXT:    andl $-8, %esp
+; AVX-X86-NEXT:    subl $8, %esp
+; AVX-X86-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-X86-NEXT:    vdivsd 16(%ebp), %xmm0, %xmm0
+; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
+; AVX-X86-NEXT:    fldl (%esp)
+; AVX-X86-NEXT:    movl %ebp, %esp
+; AVX-X86-NEXT:    popl %ebp
+; AVX-X86-NEXT:    retl
+;
+; AVX-X64-LABEL: f7:
+; AVX-X64:       # %bb.0:
+; AVX-X64-NEXT:    vdivsd %xmm1, %xmm0, %xmm0
+; AVX-X64-NEXT:    retq
 ;
 ; X87-LABEL: f7:
 ; X87:       # %bb.0:
@@ -170,16 +342,36 @@ define x86_regcallcc double @f7(double %a, double %b) #0 {
   ret double %ret
 }
 
-define x86_regcallcc float @f8(float %a, float %b) #0 {
-; SSE-LABEL: f8:
-; SSE:       # %bb.0:
-; SSE-NEXT:    divss %xmm1, %xmm0
-; SSE-NEXT:    ret{{[l|q]}}
-;
-; AVX-LABEL: f8:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vdivss %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    ret{{[l|q]}}
+define float @f8(float %a, float %b) nounwind strictfp {
+; SSE-X86-LABEL: f8:
+; SSE-X86:       # %bb.0:
+; SSE-X86-NEXT:    pushl %eax
+; SSE-X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-X86-NEXT:    divss {{[0-9]+}}(%esp), %xmm0
+; SSE-X86-NEXT:    movss %xmm0, (%esp)
+; SSE-X86-NEXT:    flds (%esp)
+; SSE-X86-NEXT:    popl %eax
+; SSE-X86-NEXT:    retl
+;
+; SSE-X64-LABEL: f8:
+; SSE-X64:       # %bb.0:
+; SSE-X64-NEXT:    divss %xmm1, %xmm0
+; SSE-X64-NEXT:    retq
+;
+; AVX-X86-LABEL: f8:
+; AVX-X86:       # %bb.0:
+; AVX-X86-NEXT:    pushl %eax
+; AVX-X86-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-X86-NEXT:    vdivss {{[0-9]+}}(%esp), %xmm0, %xmm0
+; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
+; AVX-X86-NEXT:    flds (%esp)
+; AVX-X86-NEXT:    popl %eax
+; AVX-X86-NEXT:    retl
+;
+; AVX-X64-LABEL: f8:
+; AVX-X64:       # %bb.0:
+; AVX-X64-NEXT:    vdivss %xmm1, %xmm0, %xmm0
+; AVX-X64-NEXT:    retq
 ;
 ; X87-LABEL: f8:
 ; X87:       # %bb.0:


        


More information about the llvm-commits mailing list